..; \\r
$(TRUNK)\inc; \\r
$(TRUNK)\inc\kernel;\r
-\r
+ \r
C_DEFINES=$(C_DEFINES) -DDRIVER -DDEPRECATE_DDK_FUNCTIONS -D__LITTLE_ENDIAN\r
\r
TARGETLIBS= \\r
struct ib_fmr_attr {
int max_pages;
int max_maps;
- u8 page_size;
+ u8 page_shift;
};
struct ib_ucontext {
* ibv_create_srq - Creates a SRQ associated with the specified protection
* domain.
* @pd: The protection domain associated with the SRQ.
- * @srq_init_attr: A list of initial attributes required to create the SRQ.
+ * @srq_init_attr: A list of initial attributes required to create the
+ * SRQ. If SRQ creation succeeds, then the attributes are updated to
+ * the actual capabilities of the created SRQ.
*
* srq_attr->max_wr and srq_attr->max_sge are read the determine the
* requested size of the SRQ, and set to the actual values allocated
* ibv_create_qp - Creates a QP associated with the specified protection
* domain.
* @pd: The protection domain associated with the QP.
- * @qp_init_attr: A list of initial attributes required to create the QP.
+ * @qp_init_attr: A list of initial attributes required to create the
+ * QP. If QP creation succeeds, then the attributes are updated to
+ * the actual capabilities of the created QP.
* @context: user process context (for application calls only)
* @p_umv_buf: parameters structure (for application calls only)
*/
--- /dev/null
+/*
+ * Copyright (c) 2004 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: fmr_pool.c 5274 2006-02-02 18:46:54Z roland $
+ */
+
+#include <linux/errno.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/jhash.h>
+#include <linux/kthread.h>
+
+#include <rdma/ib_fmr_pool.h>
+
+#include "core_priv.h"
+
+enum {
+ IB_FMR_MAX_REMAPS = 32,
+
+ IB_FMR_HASH_BITS = 8,
+ IB_FMR_HASH_SIZE = 1 << IB_FMR_HASH_BITS,
+ IB_FMR_HASH_MASK = IB_FMR_HASH_SIZE - 1
+};
+
+/*
+ * If an FMR is not in use, then the list member will point to either
+ * its pool's free_list (if the FMR can be mapped again; that is,
+ * remap_count < IB_FMR_MAX_REMAPS) or its pool's dirty_list (if the
+ * FMR needs to be unmapped before being remapped). In either of
+ * these cases it is a bug if the ref_count is not 0. In other words,
+ * if ref_count is > 0, then the list member must not be linked into
+ * either free_list or dirty_list.
+ *
+ * The cache_node member is used to link the FMR into a cache bucket
+ * (if caching is enabled). This is independent of the reference
+ * count of the FMR. When a valid FMR is released, its ref_count is
+ * decremented, and if ref_count reaches 0, the FMR is placed in
+ * either free_list or dirty_list as appropriate. However, it is not
+ * removed from the cache and may be "revived" if a call to
+ * ib_fmr_register_physical() occurs before the FMR is remapped. In
+ * this case we just increment the ref_count and remove the FMR from
+ * free_list/dirty_list.
+ *
+ * Before we remap an FMR from free_list, we remove it from the cache
+ * (to prevent another user from obtaining a stale FMR). When an FMR
+ * is released, we add it to the tail of the free list, so that our
+ * cache eviction policy is "least recently used."
+ *
+ * All manipulation of ref_count, list and cache_node is protected by
+ * pool_lock to maintain consistency.
+ */
+
+struct ib_fmr_pool {
+ spinlock_t pool_lock;
+
+ int pool_size;
+ int max_pages;
+ int dirty_watermark;
+ int dirty_len;
+ struct list_head free_list;
+ struct list_head dirty_list;
+ struct hlist_head *cache_bucket;
+
+ void (*flush_function)(struct ib_fmr_pool *pool,
+ void * arg);
+ void *flush_arg;
+
+ struct task_struct *thread;
+
+ atomic_t req_ser;
+ atomic_t flush_ser;
+
+ wait_queue_head_t force_wait;
+};
+
+static inline u32 ib_fmr_hash(u64 first_page)
+{
+ return jhash_2words((u32) first_page, (u32) (first_page >> 32), 0) &
+ (IB_FMR_HASH_SIZE - 1);
+}
+
+/* Caller must hold pool_lock */
+static inline struct ib_pool_fmr *ib_fmr_cache_lookup(struct ib_fmr_pool *pool,
+ u64 *page_list,
+ int page_list_len,
+ u64 io_virtual_address)
+{
+ struct hlist_head *bucket;
+ struct ib_pool_fmr *fmr;
+ struct hlist_node *pos;
+
+ if (!pool->cache_bucket)
+ return NULL;
+
+ bucket = pool->cache_bucket + ib_fmr_hash(*page_list);
+
+ hlist_for_each_entry(fmr, pos, bucket, cache_node)
+ if (io_virtual_address == fmr->io_virtual_address &&
+ page_list_len == fmr->page_list_len &&
+ !memcmp(page_list, fmr->page_list,
+ page_list_len * sizeof *page_list))
+ return fmr;
+
+ return NULL;
+}
+
+static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
+{
+ int ret;
+ struct ib_pool_fmr *fmr;
+ LIST_HEAD(unmap_list);
+ LIST_HEAD(fmr_list);
+
+ spin_lock_irq(&pool->pool_lock);
+
+ list_for_each_entry(fmr, &pool->dirty_list, list) {
+ hlist_del_init(&fmr->cache_node);
+ fmr->remap_count = 0;
+ list_add_tail(&fmr->fmr->list, &fmr_list);
+
+#ifdef DEBUG
+ if (fmr->ref_count !=0) {
+ printk(KERN_WARNING "Unmapping FMR 0x%08x with ref count %d",
+ fmr, fmr->ref_count);
+ }
+#endif
+ }
+
+ list_splice(&pool->dirty_list, &unmap_list);
+ INIT_LIST_HEAD(&pool->dirty_list);
+ pool->dirty_len = 0;
+
+ spin_unlock_irq(&pool->pool_lock);
+
+ if (list_empty(&unmap_list)) {
+ return;
+ }
+
+ ret = ib_unmap_fmr(&fmr_list);
+ if (ret)
+ printk(KERN_WARNING "ib_unmap_fmr returned %d", ret);
+
+ spin_lock_irq(&pool->pool_lock);
+ list_splice(&unmap_list, &pool->free_list);
+ spin_unlock_irq(&pool->pool_lock);
+}
+
+static int ib_fmr_cleanup_thread(void *pool_ptr)
+{
+ struct ib_fmr_pool *pool = pool_ptr;
+
+ do {
+ if (pool->dirty_len >= pool->dirty_watermark ||
+ atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
+ ib_fmr_batch_release(pool);
+
+ atomic_inc(&pool->flush_ser);
+ wake_up_interruptible(&pool->force_wait);
+
+ if (pool->flush_function)
+ pool->flush_function(pool, pool->flush_arg);
+ }
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (pool->dirty_len < pool->dirty_watermark &&
+ atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
+ !kthread_should_stop())
+ schedule();
+ __set_current_state(TASK_RUNNING);
+ } while (!kthread_should_stop());
+
+ return 0;
+}
+
+/**
+ * ib_create_fmr_pool - Create an FMR pool
+ * @pd:Protection domain for FMRs
+ * @params:FMR pool parameters
+ *
+ * Create a pool of FMRs. Return value is pointer to new pool or
+ * error code if creation failed.
+ */
+struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
+ struct ib_fmr_pool_param *params)
+{
+ struct ib_device *device;
+ struct ib_fmr_pool *pool;
+ int i;
+ int ret;
+
+ if (!params)
+ return ERR_PTR(-EINVAL);
+
+ device = pd->device;
+ if (!device->alloc_fmr || !device->dealloc_fmr ||
+ !device->map_phys_fmr || !device->unmap_fmr) {
+ printk(KERN_WARNING "Device %s does not support fast memory regions",
+ device->name);
+ return ERR_PTR(-ENOSYS);
+ }
+
+ pool = kmalloc(sizeof *pool, GFP_KERNEL);
+ if (!pool) {
+ printk(KERN_WARNING "couldn't allocate pool struct");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ pool->cache_bucket = NULL;
+
+ pool->flush_function = params->flush_function;
+ pool->flush_arg = params->flush_arg;
+
+ INIT_LIST_HEAD(&pool->free_list);
+ INIT_LIST_HEAD(&pool->dirty_list);
+
+ if (params->cache) {
+ pool->cache_bucket =
+ kmalloc(IB_FMR_HASH_SIZE * sizeof *pool->cache_bucket,
+ GFP_KERNEL);
+ if (!pool->cache_bucket) {
+ printk(KERN_WARNING "Failed to allocate cache in pool");
+ ret = -ENOMEM;
+ goto out_free_pool;
+ }
+
+ for (i = 0; i < IB_FMR_HASH_SIZE; ++i)
+ INIT_HLIST_HEAD(pool->cache_bucket + i);
+ }
+
+ pool->pool_size = 0;
+ pool->max_pages = params->max_pages_per_fmr;
+ pool->dirty_watermark = params->dirty_watermark;
+ pool->dirty_len = 0;
+ spin_lock_init(&pool->pool_lock);
+ atomic_set(&pool->req_ser, 0);
+ atomic_set(&pool->flush_ser, 0);
+ init_waitqueue_head(&pool->force_wait);
+
+ pool->thread = kthread_create(ib_fmr_cleanup_thread,
+ pool,
+ "ib_fmr(%s)",
+ device->name);
+ if (IS_ERR(pool->thread)) {
+ printk(KERN_WARNING "couldn't start cleanup thread");
+ ret = PTR_ERR(pool->thread);
+ goto out_free_pool;
+ }
+
+ {
+ struct ib_pool_fmr *fmr;
+ struct ib_fmr_attr attr = {
+ .max_pages = params->max_pages_per_fmr,
+ .max_maps = IB_FMR_MAX_REMAPS,
+ .page_shift = params->page_shift
+ };
+
+ for (i = 0; i < params->pool_size; ++i) {
+ fmr = kmalloc(sizeof *fmr + params->max_pages_per_fmr * sizeof (u64),
+ GFP_KERNEL);
+ if (!fmr) {
+ printk(KERN_WARNING "failed to allocate fmr struct "
+ "for FMR %d", i);
+ goto out_fail;
+ }
+
+ fmr->pool = pool;
+ fmr->remap_count = 0;
+ fmr->ref_count = 0;
+ INIT_HLIST_NODE(&fmr->cache_node);
+
+ fmr->fmr = ib_alloc_fmr(pd, params->access, &attr);
+ if (IS_ERR(fmr->fmr)) {
+ printk(KERN_WARNING "fmr_create failed for FMR %d", i);
+ kfree(fmr);
+ goto out_fail;
+ }
+
+ list_add_tail(&fmr->list, &pool->free_list);
+ ++pool->pool_size;
+ }
+ }
+
+ return pool;
+
+ out_free_pool:
+ kfree(pool->cache_bucket);
+ kfree(pool);
+
+ return ERR_PTR(ret);
+
+ out_fail:
+ ib_destroy_fmr_pool(pool);
+
+ return ERR_PTR(-ENOMEM);
+}
+EXPORT_SYMBOL(ib_create_fmr_pool);
+
+/**
+ * ib_destroy_fmr_pool - Free FMR pool
+ * @pool:FMR pool to free
+ *
+ * Destroy an FMR pool and free all associated resources.
+ */
+void ib_destroy_fmr_pool(struct ib_fmr_pool *pool)
+{
+ struct ib_pool_fmr *fmr;
+ struct ib_pool_fmr *tmp;
+ LIST_HEAD(fmr_list);
+ int i;
+
+ kthread_stop(pool->thread);
+ ib_fmr_batch_release(pool);
+
+ i = 0;
+ list_for_each_entry_safe(fmr, tmp, &pool->free_list, list) {
+ if (fmr->remap_count) {
+ INIT_LIST_HEAD(&fmr_list);
+ list_add_tail(&fmr->fmr->list, &fmr_list);
+ ib_unmap_fmr(&fmr_list);
+ }
+ ib_dealloc_fmr(fmr->fmr);
+ list_del(&fmr->list);
+ kfree(fmr);
+ ++i;
+ }
+
+ if (i < pool->pool_size)
+ printk(KERN_WARNING "pool still has %d regions registered",
+ pool->pool_size - i);
+
+ kfree(pool->cache_bucket);
+ kfree(pool);
+}
+EXPORT_SYMBOL(ib_destroy_fmr_pool);
+
+/**
+ * ib_flush_fmr_pool - Invalidate all unmapped FMRs
+ * @pool:FMR pool to flush
+ *
+ * Ensure that all unmapped FMRs are fully invalidated.
+ */
+int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
+{
+ int serial;
+
+ atomic_inc(&pool->req_ser);
+ /*
+ * It's OK if someone else bumps req_ser again here -- we'll
+ * just wait a little longer.
+ */
+ serial = atomic_read(&pool->req_ser);
+
+ wake_up_process(pool->thread);
+
+ if (wait_event_interruptible(pool->force_wait,
+ atomic_read(&pool->flush_ser) -
+ atomic_read(&pool->req_ser) >= 0))
+ return -EINTR;
+
+ return 0;
+}
+EXPORT_SYMBOL(ib_flush_fmr_pool);
+
+/**
+ * ib_fmr_pool_map_phys -
+ * @pool:FMR pool to allocate FMR from
+ * @page_list:List of pages to map
+ * @list_len:Number of pages in @page_list
+ * @io_virtual_address:I/O virtual address for new FMR
+ *
+ * Map an FMR from an FMR pool.
+ */
+struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle,
+ u64 *page_list,
+ int list_len,
+ u64 *io_virtual_address)
+{
+ struct ib_fmr_pool *pool = pool_handle;
+ struct ib_pool_fmr *fmr;
+ unsigned long flags;
+ int result;
+
+ if (list_len < 1 || list_len > pool->max_pages)
+ return ERR_PTR(-EINVAL);
+
+ spin_lock_irqsave(&pool->pool_lock, flags);
+ fmr = ib_fmr_cache_lookup(pool,
+ page_list,
+ list_len,
+ *io_virtual_address);
+ if (fmr) {
+ /* found in cache */
+ ++fmr->ref_count;
+ if (fmr->ref_count == 1) {
+ list_del(&fmr->list);
+ }
+
+ spin_unlock_irqrestore(&pool->pool_lock, flags);
+
+ return fmr;
+ }
+
+ if (list_empty(&pool->free_list)) {
+ spin_unlock_irqrestore(&pool->pool_lock, flags);
+ return ERR_PTR(-EAGAIN);
+ }
+
+ fmr = list_entry(pool->free_list.next, struct ib_pool_fmr, list);
+ list_del(&fmr->list);
+ hlist_del_init(&fmr->cache_node);
+ spin_unlock_irqrestore(&pool->pool_lock, flags);
+
+ result = ib_map_phys_fmr(fmr->fmr, page_list, list_len,
+ *io_virtual_address);
+
+ if (result) {
+ spin_lock_irqsave(&pool->pool_lock, flags);
+ list_add(&fmr->list, &pool->free_list);
+ spin_unlock_irqrestore(&pool->pool_lock, flags);
+
+ printk(KERN_WARNING "fmr_map returns %d\n",
+ result);
+
+ return ERR_PTR(result);
+ }
+
+ ++fmr->remap_count;
+ fmr->ref_count = 1;
+
+ if (pool->cache_bucket) {
+ fmr->io_virtual_address = *io_virtual_address;
+ fmr->page_list_len = list_len;
+ memcpy(fmr->page_list, page_list, list_len * sizeof(*page_list));
+
+ spin_lock_irqsave(&pool->pool_lock, flags);
+ hlist_add_head(&fmr->cache_node,
+ pool->cache_bucket + ib_fmr_hash(fmr->page_list[0]));
+ spin_unlock_irqrestore(&pool->pool_lock, flags);
+ }
+
+ return fmr;
+}
+EXPORT_SYMBOL(ib_fmr_pool_map_phys);
+
+/**
+ * ib_fmr_pool_unmap - Unmap FMR
+ * @fmr:FMR to unmap
+ *
+ * Unmap an FMR. The FMR mapping may remain valid until the FMR is
+ * reused (or until ib_flush_fmr_pool() is called).
+ */
+int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
+{
+ struct ib_fmr_pool *pool;
+ unsigned long flags;
+
+ pool = fmr->pool;
+
+ spin_lock_irqsave(&pool->pool_lock, flags);
+
+ --fmr->ref_count;
+ if (!fmr->ref_count) {
+ if (fmr->remap_count < IB_FMR_MAX_REMAPS) {
+ list_add_tail(&fmr->list, &pool->free_list);
+ } else {
+ list_add_tail(&fmr->list, &pool->dirty_list);
+ ++pool->dirty_len;
+ wake_up_process(pool->thread);
+ }
+ }
+
+#ifdef DEBUG
+ if (fmr->ref_count < 0)
+ printk(KERN_WARNING "FMR %p has ref count %d < 0",
+ fmr, fmr->ref_count);
+#endif
+
+ spin_unlock_irqrestore(&pool->pool_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(ib_fmr_pool_unmap);
--- /dev/null
+/*
+ * Copyright (c) 2004 Topspin Corporation. All rights reserved.
+ * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: ib_fmr_pool.h 5273 2006-02-02 18:46:00Z roland $
+ */
+
+#if !defined(IB_FMR_POOL_H)
+#define IB_FMR_POOL_H
+
+#include <rdma/ib_verbs.h>
+
+struct ib_fmr_pool;
+
+/**
+ * struct ib_fmr_pool_param - Parameters for creating FMR pool
+ * @max_pages_per_fmr:Maximum number of pages per map request.
+ * @page_shift: Log2 of sizeof "pages" mapped by this fmr
+ * @access:Access flags for FMRs in pool.
+ * @pool_size:Number of FMRs to allocate for pool.
+ * @dirty_watermark:Flush is triggered when @dirty_watermark dirty
+ * FMRs are present.
+ * @flush_function:Callback called when unmapped FMRs are flushed and
+ * more FMRs are possibly available for mapping
+ * @flush_arg:Context passed to user's flush function.
+ * @cache:If set, FMRs may be reused after unmapping for identical map
+ * requests.
+ */
+struct ib_fmr_pool_param {
+ int max_pages_per_fmr;
+ int page_shift;
+ enum ib_access_flags access;
+ int pool_size;
+ int dirty_watermark;
+ void (*flush_function)(struct ib_fmr_pool *pool,
+ void * arg);
+ void *flush_arg;
+ unsigned cache:1;
+};
+
+struct ib_pool_fmr {
+ struct ib_fmr *fmr;
+ struct ib_fmr_pool *pool;
+ struct list_head list;
+ struct hlist_node cache_node;
+ int ref_count;
+ int remap_count;
+ u64 io_virtual_address;
+ int page_list_len;
+ u64 page_list[0];
+};
+
+struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
+ struct ib_fmr_pool_param *params);
+
+void ib_destroy_fmr_pool(struct ib_fmr_pool *pool);
+
+int ib_flush_fmr_pool(struct ib_fmr_pool *pool);
+
+struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle,
+ u64 *page_list,
+ int list_len,
+ u64 *io_virtual_address);
+
+int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr);
+
+#endif /* IB_FMR_POOL_H */
// MACROS
// ===========================================
-// nullifying macros
-#define might_sleep()
-
// ARRAY_SIZE
#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+++ /dev/null
-/*
- * Copyright (c) 2005 Topspin Communications. All rights reserved.
- * Copyright (c) 2005 Cisco Systems. All rights reserved.
- * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
- * Copyright (c) 2005 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- * $Id: uverbs.h 4227 2005-11-30 00:58:50Z roland $
- */
-
-#ifndef UVERBS_H
-#define UVERBS_H
-
-#include <ib_verbs.h>
-#include <ib_user_verbs.h>
-
-/*
- * Our lifetime rules for these structs are the following:
- *
- * struct ib_uverbs_device: One reference is held by the module and
- * released in ib_uverbs_remove_one(). Another reference is taken by
- * ib_uverbs_open() each time the character special file is opened,
- * and released in ib_uverbs_release_file() when the file is released.
- *
- * struct ib_uverbs_file: One reference is held by the VFS and
- * released when the file is closed. Another reference is taken when
- * an asynchronous event queue file is created and released when the
- * event file is closed.
- *
- * struct ib_uverbs_event_file: One reference is held by the VFS and
- * released when the file is closed. For asynchronous event files,
- * another reference is held by the corresponding main context file
- * and released when that file is closed. For completion event files,
- * a reference is taken when a CQ is created that uses the file, and
- * released when the CQ is destroyed.
- */
-
-struct ib_uverbs_device {
- struct ib_device *ib_dev;
-};
-
-struct ib_uverbs_mcast_entry {
- struct list_head list;
- union ib_gid gid;
- u16 lid;
-};
-
-struct ib_uevent_object {
- struct ib_uobject uobject;
- struct list_head event_list;
- u32 events_reported;
-};
-
-struct ib_uqp_object {
- struct ib_uevent_object uevent;
- struct list_head mcast_list;
-};
-
-struct ib_ucq_object {
- struct ib_uobject uobject;
- struct ib_uverbs_file *uverbs_file;
- struct list_head comp_list;
- struct list_head async_list;
- u32 comp_events_reported;
- u32 async_events_reported;
-};
-
-#ifdef LIN_TO_BE_CHANGED
-extern struct semaphore ib_uverbs_idr_mutex;
-extern struct idr ib_uverbs_pd_idr;
-extern struct idr ib_uverbs_mr_idr;
-extern struct idr ib_uverbs_mw_idr;
-extern struct idr ib_uverbs_ah_idr;
-extern struct idr ib_uverbs_cq_idr;
-extern struct idr ib_uverbs_qp_idr;
-extern struct idr ib_uverbs_srq_idr;
-#endif
-
-struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file,
- int is_async, int *fd);
-void ib_uverbs_release_event_file(struct kref *ref);
-struct ib_uverbs_event_file *ib_uverbs_lookup_comp_file(int fd);
-
-void ib_uverbs_release_ucq(struct ib_uverbs_file *file,
- struct ib_uverbs_event_file *ev_file,
- struct ib_ucq_object *uobj);
-void ib_uverbs_release_uevent(struct ib_uverbs_file *file,
- struct ib_uevent_object *uobj);
-
-void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context);
-void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr);
-void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr);
-void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr);
-void ib_uverbs_event_handler(struct ib_event_handler *handler,
- struct ib_event *event);
-
-int ib_umem_get(struct ib_device *dev, struct ib_umem *mem,
- void *addr, size_t size, int write);
-void ib_umem_release(struct ib_device *dev, struct ib_umem *umem);
-void ib_umem_release_on_close(struct ib_device *dev, struct ib_umem *umem);
-
-#ifdef LIN_TO_BE_CHANGED
-#define IB_UVERBS_DECLARE_CMD(name) \
- ssize_t ib_uverbs_##name(struct ib_uverbs_file *file, \
- const char *buf, int in_len, \
- int out_len)
-#endif
-
-IB_UVERBS_DECLARE_CMD(get_context);
-IB_UVERBS_DECLARE_CMD(query_device);
-IB_UVERBS_DECLARE_CMD(query_port);
-IB_UVERBS_DECLARE_CMD(alloc_pd);
-IB_UVERBS_DECLARE_CMD(dealloc_pd);
-IB_UVERBS_DECLARE_CMD(reg_mr);
-IB_UVERBS_DECLARE_CMD(dereg_mr);
-IB_UVERBS_DECLARE_CMD(create_comp_channel);
-IB_UVERBS_DECLARE_CMD(create_cq);
-IB_UVERBS_DECLARE_CMD(poll_cq);
-IB_UVERBS_DECLARE_CMD(req_notify_cq);
-IB_UVERBS_DECLARE_CMD(destroy_cq);
-IB_UVERBS_DECLARE_CMD(create_qp);
-IB_UVERBS_DECLARE_CMD(modify_qp);
-IB_UVERBS_DECLARE_CMD(destroy_qp);
-IB_UVERBS_DECLARE_CMD(post_send);
-IB_UVERBS_DECLARE_CMD(post_recv);
-IB_UVERBS_DECLARE_CMD(post_srq_recv);
-IB_UVERBS_DECLARE_CMD(create_ah);
-IB_UVERBS_DECLARE_CMD(destroy_ah);
-IB_UVERBS_DECLARE_CMD(attach_mcast);
-IB_UVERBS_DECLARE_CMD(detach_mcast);
-IB_UVERBS_DECLARE_CMD(create_srq);
-IB_UVERBS_DECLARE_CMD(modify_srq);
-IB_UVERBS_DECLARE_CMD(destroy_srq);
-
-struct ib_pd *ib_uverbs_alloc_pd(struct ib_device *device,
- struct ib_ucontext *context, ci_umv_buf_t* const p_umv_buf);
-
-int ib_uverbs_dealloc_pd(struct ib_pd *pd);
-
-#endif /* UVERBS_H */
{
int err = 0;
- if (down_interruptible(&dev->cmd.hcr_mutex))
- return -EINTR;
+ down(&dev->cmd.hcr_mutex);
if (event && wait_go_bit(dev,GO_BIT_TIMEOUT)) {
err = -EAGAIN;
{
int err = 0;
- if (sem_down_interruptible(&dev->cmd.poll_sem))
- return -EINTR;
+ sem_down(&dev->cmd.poll_sem);
err = mthca_cmd_post(dev, in_param,
out_param ? *out_param : 0,
struct mthca_cmd_context *context;
SPIN_LOCK_PREP(lh);
- if (sem_down_interruptible(&dev->cmd.event_sem))
- return -EINTR;
+ sem_down(&dev->cmd.event_sem);
spin_lock( &dev->cmd.context_lock, &lh );
BUG_ON(dev->cmd.free_head < 0);
MTHCA_GET(size, outbox, QUERY_DEV_LIM_UAR_ENTRY_SZ_OFFSET);
dev_lim->uar_scratch_entry_sz = size;
- HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Max QPs: %d, reserved QPs: %d, entry size: %d\n",
- dev_lim->max_qps, dev_lim->reserved_qps, dev_lim->qpc_entry_sz));
- HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Max SRQs: %d, reserved SRQs: %d, entry size: %d\n",
- dev_lim->max_srqs, dev_lim->reserved_srqs, dev_lim->srq_entry_sz));
- HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Max CQs: %d, reserved CQs: %d, entry size: %d\n",
- dev_lim->max_cqs, dev_lim->reserved_cqs, dev_lim->cqc_entry_sz));
- HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Max EQs: %d, reserved EQs: %d, entry size: %d\n",
- dev_lim->max_eqs, dev_lim->reserved_eqs, dev_lim->eqc_entry_sz));
- HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("reserved MPTs: %d, reserved MTTs: %d\n",
- dev_lim->reserved_mrws, dev_lim->reserved_mtts));
- HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n",
- dev_lim->max_pds, dev_lim->reserved_pds, dev_lim->reserved_uars));
- HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Max QP/MCG: %d, reserved MGMs: %d\n",
- dev_lim->max_pds, dev_lim->reserved_mgms));
- HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n",
- dev_lim->max_cq_sz, dev_lim->max_qp_sz, dev_lim->max_srq_sz));
-
- HCA_PRINT(TRACE_LEVEL_VERBOSE ,HCA_DBG_LOW ,("Flags: %08x\n", dev_lim->flags));
-
if (mthca_is_memfree(dev)) {
MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SRQ_SZ_OFFSET);
dev_lim->max_srq_sz = 1 << field;
dev_lim->mpt_entry_sz = MTHCA_MPT_ENTRY_SIZE;
}
+ HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Max QPs: %d, reserved QPs: %d, entry size: %d\n",
+ dev_lim->max_qps, dev_lim->reserved_qps, dev_lim->qpc_entry_sz));
+ HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Max SRQs: %d, reserved SRQs: %d, entry size: %d\n",
+ dev_lim->max_srqs, dev_lim->reserved_srqs, dev_lim->srq_entry_sz));
+ HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Max CQs: %d, reserved CQs: %d, entry size: %d\n",
+ dev_lim->max_cqs, dev_lim->reserved_cqs, dev_lim->cqc_entry_sz));
+ HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Max EQs: %d, reserved EQs: %d, entry size: %d\n",
+ dev_lim->max_eqs, dev_lim->reserved_eqs, dev_lim->eqc_entry_sz));
+ HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("reserved MPTs: %d, reserved MTTs: %d\n",
+ dev_lim->reserved_mrws, dev_lim->reserved_mtts));
+ HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n",
+ dev_lim->max_pds, dev_lim->reserved_pds, dev_lim->reserved_uars));
+ HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Max QP/MCG: %d, reserved MGMs: %d\n",
+ dev_lim->max_pds, dev_lim->reserved_mgms));
+ HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n",
+ dev_lim->max_cq_sz, dev_lim->max_qp_sz, dev_lim->max_srq_sz));
+
+ HCA_PRINT(TRACE_LEVEL_VERBOSE ,HCA_DBG_LOW ,("Flags: %08x\n", dev_lim->flags));
+
out:
mthca_free_mailbox(dev, mailbox);
return err;
}
}
} else
- err = mthca_cmd(dev, mailbox->dma, (!!is_ee << 24) | num,
+ err = mthca_cmd(dev, mailbox->dma, optmask | (!!is_ee << 24) | num,
op_mod, op[trans], CMD_TIME_CLASS_C, status);
if (my_mailbox)
wake_up(&cq->wait);
}
-static int handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq,
+static void handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq,
struct mthca_qp *qp, int wqe_index, int is_send,
struct mthca_err_cqe *cqe,
struct _ib_wc *entry, int *free_cqe)
{
- int err;
int dbd;
__be32 new_wqe;
* error case, so we don't have to check the doorbell count, etc.
*/
if (mthca_is_memfree(dev))
- return 0;
+ return;
- err = mthca_free_err_wqe(dev, qp, is_send, wqe_index, &dbd, &new_wqe);
- if (err)
- return err;
+ mthca_free_err_wqe(dev, qp, is_send, wqe_index, &dbd, &new_wqe);
/*
* If we're at the end of the WQE chain, or we've used up our
* the next poll operation.
*/
if (!(new_wqe & cl_hton32(0x3f)) || (!cqe->db_cnt && dbd))
- return 0;
+ return;
cqe->db_cnt = cl_hton16(cl_ntoh16((u16)(cqe->db_cnt - dbd)));
cqe->wqe = new_wqe;
cqe->syndrome = SYNDROME_WR_FLUSH_ERR;
*free_cqe = 0;
-
- return 0;
}
static inline int mthca_poll_one(struct mthca_dev *dev,
}
if (is_error) {
- err = handle_error_cqe(dev, cq, *cur_qp, wqe_index, is_send,
- (struct mthca_err_cqe *) cqe,
- entry, &free_cqe);
+ handle_error_cqe(dev, cq, *cur_qp, wqe_index, is_send,
+ (struct mthca_err_cqe *) cqe, entry, &free_cqe);
}
else
entry->status = IB_WCS_SUCCESS;
int i;
SPIN_LOCK_PREP(lh);
- might_sleep();
-
cq->ibcq.cqe = nent - 1;
cq->is_kernel = !ctx;
u8 status;
SPIN_LOCK_PREP(lh);
- might_sleep();
-
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
if (IS_ERR(mailbox)) {
HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("No memory for mailbox to free CQ.\n"));
struct _ib_send_wr **bad_wr);
int mthca_arbel_post_receive(struct ib_qp *ibqp, struct _ib_recv_wr *wr,
struct _ib_recv_wr **bad_wr);
-int mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send,
+void mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send,
int index, int *dbd, __be32 *new_wqe);
int mthca_alloc_qp(struct mthca_dev *dev,
struct mthca_pd *pd,
HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_INIT,("MAP_EQ for cmd EQ %d returned status 0x%02x\n",
dev->eq_table.eq[MTHCA_EQ_CMD].eqn, status));
- for (i = 0; i < MTHCA_EQ_CMD; ++i)
+ for (i = 0; i < MTHCA_NUM_EQ; ++i)
if (mthca_is_memfree(dev))
arbel_eq_req_not(dev, dev->eq_table.eq[i].eqn_mask);
else
return -ENODEV;
}
+ if (dev_lim->uar_size > pci_resource_len(mdev, 2)) {
+ HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW , ("HCA reported UAR size of 0x%x bigger than "
+ "PCI resource 2 size of 0x%lx, aborting.\n",
+ dev_lim->uar_size, pci_resource_len(mdev, 2)));
+ return -ENODEV;
+ }
+
+
mdev->limits.num_ports = dev_lim->num_ports;
mdev->limits.vl_cap = dev_lim->max_vl;
mdev->limits.mtu_cap = dev_lim->max_mtu;
return PTR_ERR(mailbox);
mgm = mailbox->buf;
- if (down_interruptible(&dev->mcg_table.mutex)) {
- err = -EINTR;
- goto err_sem;
- }
+ down(&dev->mcg_table.mutex);
err = find_mgm(dev, gid->raw, mailbox, &hash, &prev, &index);
if (err)
mthca_free(&dev->mcg_table.alloc, index);
}
KeReleaseMutex(&dev->mcg_table.mutex,FALSE);
-err_sem:
mthca_free_mailbox(dev, mailbox);
return err;
}
return PTR_ERR(mailbox);
mgm = mailbox->buf;
- if (down_interruptible(&dev->mcg_table.mutex)) {
- err = -EINTR;
- goto err_sem;
- }
+ down(&dev->mcg_table.mutex);
err = find_mgm(dev, gid->raw, mailbox, &hash, &prev, &index);
if (err)
out:
KeReleaseMutex(&dev->mcg_table.mutex, FALSE);
-err_sem:
mthca_free_mailbox(dev, mailbox);
return err;
}
u8 status;
CPU_2_BE64_PREP;
- might_sleep();
-
WARN_ON(buffer_size_shift >= 32);
key = mthca_alloc(&dev->mr_table.mpt_alloc);
int err;
u8 status;
- might_sleep();
-
err = mthca_HW2SW_MPT(dev, NULL,
key_to_hw_index(dev, mr->ibmr.lkey) &
(dev->limits.num_mpts - 1),
int i;
CPU_2_BE64_PREP;
- might_sleep();
-
- if (mr->attr.page_size < 12 || mr->attr.page_size >= 32)
+ if (mr->attr.page_shift < 12 || mr->attr.page_shift >= 32)
return -EINVAL;
/* For Arbel, all MTTs must fit in the same page. */
MTHCA_MPT_FLAG_REGION |
access);
- mpt_entry->page_size = cl_hton32(mr->attr.page_size - 12);
+ mpt_entry->page_size = cl_hton32(mr->attr.page_shift - 12);
mpt_entry->key = cl_hton32(key);
mpt_entry->pd = cl_hton32(pd);
RtlZeroMemory(&mpt_entry->start,
if (list_len > fmr->attr.max_pages)
return -EINVAL;
- page_mask = (1 << fmr->attr.page_size) - 1;
+ page_mask = (1 << fmr->attr.page_shift) - 1;
/* We are getting page lists, so va must be page aligned. */
if (iova & page_mask)
}
mpt_entry.lkey = cl_hton32(key);
- mpt_entry.length = CPU_2_BE64(list_len * (1ull << fmr->attr.page_size));
+ mpt_entry.length = CPU_2_BE64(list_len * (1ull << fmr->attr.page_shift));
mpt_entry.start = cl_hton64(iova);
__raw_writel((u32) mpt_entry.lkey, &fmr->mem.tavor.mpt->key);
fmr->mem.arbel.mpt->key = cl_hton32(key);
fmr->mem.arbel.mpt->lkey = cl_hton32(key);
- fmr->mem.arbel.mpt->length = CPU_2_BE64(list_len * (1ull << fmr->attr.page_size));
+ fmr->mem.arbel.mpt->length = CPU_2_BE64(list_len * (1ull << fmr->attr.page_shift));
fmr->mem.arbel.mpt->start = cl_hton64(iova);
wmb();
{
int err = 0;
- might_sleep();
-
pd->privileged = privileged;
atomic_set(&pd->sqp_count, 0);
void mthca_pd_free(struct mthca_dev *dev, struct mthca_pd *pd)
{
- might_sleep();
if (pd->privileged)
mthca_free_mr(dev, &pd->ntmr);
mthca_free(&dev->pd_table.alloc, pd->pd_num);
t[IBQPS_RTS].opt_param[MLX] = IB_QP_CUR_STATE |IB_QP_QKEY;
t[IBQPS_SQD].trans = MTHCA_TRANS_RTS2SQD;
+ t[IBQPS_SQD].opt_param[UD] = IB_QP_EN_SQD_ASYNC_NOTIFY;
+ t[IBQPS_SQD].opt_param[UC] = IB_QP_EN_SQD_ASYNC_NOTIFY;
+ t[IBQPS_SQD].opt_param[RC] = IB_QP_EN_SQD_ASYNC_NOTIFY;
+ t[IBQPS_SQD].opt_param[MLX] = IB_QP_EN_SQD_ASYNC_NOTIFY;
/* IBQPS_SQD */
t = &state_table[IBQPS_SQD][0];
struct mthca_qp_param *qp_param;
struct mthca_qp_context *qp_context;
u32 req_param, opt_param;
+ u32 sqd_event = 0;
u8 status;
int err;
SPIN_LOCK_PREP(lhs);
qp_context->srqn = cl_hton32(1 << 24 |
to_msrq(ibqp->srq)->srqn);
+ if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD &&
+ attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY &&
+ attr->en_sqd_async_notify)
+ sqd_event = (u32)(1 << 31);
+
err = mthca_MODIFY_QP(dev, state_table[cur_state][new_state].trans,
- qp->qpn, 0, mailbox, 0, &status);
+ qp->qpn, 0, mailbox, sqd_event, &status);
if (status) {
HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP,("modify QP %d returned status %02x.\n",
state_table[cur_state][new_state].trans, status));
qp->send_wqe_offset) |opcode);
wmb();
((struct mthca_next_seg *) prev_wqe)->ee_nds =
- cl_hton32((size0 ? 0 : MTHCA_NEXT_DBD) |size);
+ cl_hton32((size0 ? 0 : MTHCA_NEXT_DBD) | size |
+ ((wr->send_opt & IB_SEND_OPT_FENCE) ?
+ MTHCA_NEXT_FENCE : 0));
if (!size0) {
size0 = size;
qp->send_wqe_offset) |opcode);
wmb();
((struct mthca_next_seg *) prev_wqe)->ee_nds =
- cl_hton32(MTHCA_NEXT_DBD | size);
+ cl_hton32(MTHCA_NEXT_DBD | size |
+ ((wr->send_opt & IB_SEND_OPT_FENCE) ?
+ MTHCA_NEXT_FENCE : 0));
if (!size0) {
size0 = size;
return err;
}
-int mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send,
+void mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send,
int index, int *dbd, __be32 *new_wqe)
{
struct mthca_next_seg *next;
*/
if (qp->ibqp.srq) {
*new_wqe = 0;
- return 0;
+ return;
}
if (is_send)
(next->ee_nds & cl_hton32(0x3f));
else
*new_wqe = 0;
-
- return 0;
}
int mthca_init_qp_table(struct mthca_dev *dev)
srq->first_free = 0;
srq->last_free = srq->max - 1;
+ attr->max_wr = srq->max;
+ attr->max_sge = srq->max_gs;
+
return 0;
err_out_free_srq:
typedef struct ibv_device *(*ibv_driver_init_func)(struct sysfs_class_device *);
-extern int ibv_cmd_get_context(struct ibv_context *context, struct ibv_get_context *cmd,
+ int ibv_cmd_get_context(struct ibv_context *context, struct ibv_get_context *cmd,
size_t cmd_size, struct ibv_get_context_resp *resp,
size_t resp_size);
-extern int ibv_cmd_query_device(struct ibv_context *context,
+ int ibv_cmd_query_device(struct ibv_context *context,
struct ibv_device_attr *device_attr,
uint64_t *raw_fw_ver,
struct ibv_query_device *cmd, size_t cmd_size);
-extern int ibv_cmd_query_port(struct ibv_context *context, uint8_t port_num,
+ int ibv_cmd_query_port(struct ibv_context *context, uint8_t port_num,
struct ibv_port_attr *port_attr,
struct ibv_query_port *cmd, size_t cmd_size);
-extern int ibv_cmd_query_gid(struct ibv_context *context, uint8_t port_num,
+ int ibv_cmd_query_gid(struct ibv_context *context, uint8_t port_num,
int index, union ibv_gid *gid);
-extern int ibv_cmd_query_pkey(struct ibv_context *context, uint8_t port_num,
+ int ibv_cmd_query_pkey(struct ibv_context *context, uint8_t port_num,
int index, uint16_t *pkey);
-extern int ibv_cmd_alloc_pd(struct ibv_context *context, struct ibv_pd *pd,
+ int ibv_cmd_alloc_pd(struct ibv_context *context, struct ibv_pd *pd,
struct ibv_alloc_pd *cmd, size_t cmd_size,
struct ibv_alloc_pd_resp *resp, size_t resp_size);
-extern int ibv_cmd_dealloc_pd(struct ibv_pd *pd);
-extern int ibv_cmd_reg_mr(struct ibv_pd *pd, void *addr, size_t length,
+ int ibv_cmd_dealloc_pd(struct ibv_pd *pd);
+ int ibv_cmd_reg_mr(struct ibv_pd *pd, void *addr, size_t length,
uint64_t hca_va, enum ibv_access_flags access,
struct ibv_mr *mr, struct ibv_reg_mr *cmd,
size_t cmd_size);
-extern int ibv_cmd_dereg_mr(struct ibv_mr *mr);
-extern int ibv_cmd_create_cq(struct ibv_context *context, int cqe,
+ int ibv_cmd_dereg_mr(struct ibv_mr *mr);
+ int ibv_cmd_create_cq(struct ibv_context *context, int cqe,
struct ibv_comp_channel *channel,
int comp_vector, struct ibv_cq *cq,
struct ibv_create_cq *cmd, size_t cmd_size,
struct ibv_create_cq_resp *resp, size_t resp_size);
-extern int ibv_cmd_poll_cq(struct ibv_cq *cq, int ne, struct ibv_wc *wc);
-extern int ibv_cmd_req_notify_cq(struct ibv_cq *cq, int solicited_only);
-extern int ibv_cmd_destroy_cq(struct ibv_cq *cq);
+ int ibv_cmd_poll_cq(struct ibv_cq *cq, int ne, struct ibv_wc *wc);
+ int ibv_cmd_req_notify_cq(struct ibv_cq *cq, int solicited_only);
+ int ibv_cmd_destroy_cq(struct ibv_cq *cq);
-extern int ibv_cmd_create_srq(struct ibv_pd *pd,
+ int ibv_cmd_create_srq(struct ibv_pd *pd,
struct ibv_srq *srq, struct ibv_srq_init_attr *attr,
struct ibv_create_srq *cmd, size_t cmd_size,
struct ibv_create_srq_resp *resp, size_t resp_size);
-extern int ibv_cmd_modify_srq(struct ibv_srq *srq,
+ int ibv_cmd_modify_srq(struct ibv_srq *srq,
struct ibv_srq_attr *srq_attr,
enum ibv_srq_attr_mask srq_attr_mask,
struct ibv_modify_srq *cmd, size_t cmd_size);
-extern int ibv_cmd_destroy_srq(struct ibv_srq *srq);
+ int ibv_cmd_destroy_srq(struct ibv_srq *srq);
-extern int ibv_cmd_create_qp(struct ibv_pd *pd,
+ int ibv_cmd_create_qp(struct ibv_pd *pd,
struct ibv_qp *qp, struct ibv_qp_init_attr *attr,
struct ibv_create_qp *cmd, size_t cmd_size);
-extern int ibv_cmd_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
+ int ibv_cmd_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
enum ibv_qp_attr_mask attr_mask,
struct ibv_modify_qp *cmd, size_t cmd_size);
-extern int ibv_cmd_destroy_qp(struct ibv_qp *qp);
-extern int ibv_cmd_post_send(struct ibv_qp *ibqp, struct ibv_send_wr *wr,
+ int ibv_cmd_destroy_qp(struct ibv_qp *qp);
+ int ibv_cmd_post_send(struct ibv_qp *ibqp, struct ibv_send_wr *wr,
struct ibv_send_wr **bad_wr);
-extern int ibv_cmd_post_recv(struct ibv_qp *ibqp, struct ibv_recv_wr *wr,
+ int ibv_cmd_post_recv(struct ibv_qp *ibqp, struct ibv_recv_wr *wr,
struct ibv_recv_wr **bad_wr);
-extern int ibv_cmd_post_srq_recv(struct ibv_srq *srq, struct ibv_recv_wr *wr,
+ int ibv_cmd_post_srq_recv(struct ibv_srq *srq, struct ibv_recv_wr *wr,
struct ibv_recv_wr **bad_wr);
-extern int ibv_cmd_create_ah(struct ibv_pd *pd, struct ibv_ah *ah,
+ int ibv_cmd_create_ah(struct ibv_pd *pd, struct ibv_ah *ah,
struct ibv_ah_attr *attr);
-extern int ibv_cmd_destroy_ah(struct ibv_ah *ah);
-extern int ibv_cmd_attach_mcast(struct ibv_qp *qp, union ibv_gid *gid, uint16_t lid);
-extern int ibv_cmd_detach_mcast(struct ibv_qp *qp, union ibv_gid *gid, uint16_t lid);
+ int ibv_cmd_destroy_ah(struct ibv_ah *ah);
+ int ibv_cmd_attach_mcast(struct ibv_qp *qp, union ibv_gid *gid, uint16_t lid);
+ int ibv_cmd_detach_mcast(struct ibv_qp *qp, union ibv_gid *gid, uint16_t lid);
#endif /* INFINIBAND_DRIVER_H */
return to_mctx(ibctx)->hca_type == MTHCA_ARBEL;
}
-extern int mthca_alloc_db(struct mthca_db_table *db_tab, enum mthca_db_type type,
+int mthca_alloc_db(struct mthca_db_table *db_tab, enum mthca_db_type type,
uint32_t **db);
-extern void mthca_set_db_qn(uint32_t *db, enum mthca_db_type type, uint32_t qn);
-extern void mthca_free_db(struct mthca_db_table *db_tab, enum mthca_db_type type, int db_index);
-extern struct mthca_db_table *mthca_alloc_db_tab(int uarc_size);
-extern void mthca_free_db_tab(struct mthca_db_table *db_tab);
+void mthca_set_db_qn(uint32_t *db, enum mthca_db_type type, uint32_t qn);
+void mthca_free_db(struct mthca_db_table *db_tab, enum mthca_db_type type, int db_index);
+struct mthca_db_table *mthca_alloc_db_tab(int uarc_size);
+void mthca_free_db_tab(struct mthca_db_table *db_tab);
-extern int mthca_query_device(struct ibv_context *context,
+int mthca_query_device(struct ibv_context *context,
struct ibv_device_attr *attr);
-extern int mthca_query_port(struct ibv_context *context, uint8_t port,
+int mthca_query_port(struct ibv_context *context, uint8_t port,
struct ibv_port_attr *attr);
-extern struct ibv_pd *mthca_alloc_pd(struct ibv_context *context,
+ struct ibv_pd *mthca_alloc_pd(struct ibv_context *context,
struct ibv_alloc_pd_resp *resp_p);
-extern int mthca_free_pd(struct ibv_pd *pd);
+int mthca_free_pd(struct ibv_pd *pd);
-extern struct ibv_cq *mthca_create_cq_pre(struct ibv_context *context, int *cqe,
+struct ibv_cq *mthca_create_cq_pre(struct ibv_context *context, int *cqe,
struct ibv_create_cq *req);
-extern struct ibv_cq *mthca_create_cq_post(struct ibv_context *context,
+struct ibv_cq *mthca_create_cq_post(struct ibv_context *context,
struct ibv_create_cq_resp *resp);
-extern int mthca_destroy_cq(struct ibv_cq *cq);
-extern int mthca_poll_cq(struct ibv_cq *cq, int ne, struct _ib_wc *wc);
-extern int mthca_poll_cq_list(struct ibv_cq *ibcq,
+int mthca_destroy_cq(struct ibv_cq *cq);
+int mthca_poll_cq(struct ibv_cq *cq, int ne, struct _ib_wc *wc);
+int mthca_poll_cq_list(struct ibv_cq *ibcq,
struct _ib_wc** const pp_free_wclist,
struct _ib_wc** const pp_done_wclist );
-extern int mthca_tavor_arm_cq(struct ibv_cq *cq, int solicited);
-extern int mthca_arbel_arm_cq(struct ibv_cq *cq, int solicited);
-extern void mthca_arbel_cq_event(struct ibv_cq *cq);
-extern void mthca_cq_clean(struct mthca_cq *cq, uint32_t qpn,
+int mthca_tavor_arm_cq(struct ibv_cq *cq, int solicited);
+int mthca_arbel_arm_cq(struct ibv_cq *cq, int solicited);
+void mthca_arbel_cq_event(struct ibv_cq *cq);
+void mthca_cq_clean(struct mthca_cq *cq, uint32_t qpn,
struct mthca_srq *srq);
-extern void mthca_init_cq_buf(struct mthca_cq *cq, int nent);
+void mthca_init_cq_buf(struct mthca_cq *cq, int nent);
-extern struct ibv_srq *mthca_create_srq(struct ibv_pd *pd,
+struct ibv_srq *mthca_create_srq(struct ibv_pd *pd,
struct ibv_srq_init_attr *attr);
-extern int mthca_modify_srq(struct ibv_srq *srq,
+int mthca_modify_srq(struct ibv_srq *srq,
struct ibv_srq_attr *attr,
enum ibv_srq_attr_mask mask);
-extern int mthca_destroy_srq(struct ibv_srq *srq);
-extern int mthca_alloc_srq_buf(struct ibv_pd *pd, struct ibv_srq_attr *attr,
+int mthca_destroy_srq(struct ibv_srq *srq);
+int mthca_alloc_srq_buf(struct ibv_pd *pd, struct ibv_srq_attr *attr,
struct mthca_srq *srq);
-extern void mthca_free_srq_wqe(struct mthca_srq *srq, int ind);
-extern int mthca_tavor_post_srq_recv(struct ibv_srq *ibsrq,
+void mthca_free_srq_wqe(struct mthca_srq *srq, int ind);
+int mthca_tavor_post_srq_recv(struct ibv_srq *ibsrq,
struct _ib_recv_wr *wr,
struct _ib_recv_wr **bad_wr);
-extern int mthca_arbel_post_srq_recv(struct ibv_srq *ibsrq,
+int mthca_arbel_post_srq_recv(struct ibv_srq *ibsrq,
struct _ib_recv_wr *wr,
struct _ib_recv_wr **bad_wr);
-extern struct ibv_qp *mthca_create_qp_pre(struct ibv_pd *pd,
+struct ibv_qp *mthca_create_qp_pre(struct ibv_pd *pd,
struct ibv_qp_init_attr *attr, struct ibv_create_qp *req);
-extern struct ibv_qp *mthca_create_qp_post(struct ibv_pd *pd,
+struct ibv_qp *mthca_create_qp_post(struct ibv_pd *pd,
struct ibv_create_qp_resp *resp);
-extern int mthca_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
+int mthca_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
enum ibv_qp_attr_mask attr_mask);
-extern int mthca_destroy_qp(struct ibv_qp *qp);
-extern void mthca_init_qp_indices(struct mthca_qp *qp);
-extern int mthca_tavor_post_send(struct ibv_qp *ibqp, struct _ib_send_wr *wr,
+int mthca_destroy_qp(struct ibv_qp *qp);
+void mthca_init_qp_indices(struct mthca_qp *qp);
+int mthca_tavor_post_send(struct ibv_qp *ibqp, struct _ib_send_wr *wr,
struct _ib_send_wr **bad_wr);
-extern int mthca_tavor_post_recv(struct ibv_qp *ibqp, struct _ib_recv_wr *wr,
+int mthca_tavor_post_recv(struct ibv_qp *ibqp, struct _ib_recv_wr *wr,
struct _ib_recv_wr **bad_wr);
-extern int mthca_arbel_post_send(struct ibv_qp *ibqp, struct _ib_send_wr *wr,
+int mthca_arbel_post_send(struct ibv_qp *ibqp, struct _ib_send_wr *wr,
struct _ib_send_wr **bad_wr);
-extern int mthca_arbel_post_recv(struct ibv_qp *ibqp, struct _ib_recv_wr *wr,
+int mthca_arbel_post_recv(struct ibv_qp *ibqp, struct _ib_recv_wr *wr,
struct _ib_recv_wr **bad_wr);
-extern int mthca_alloc_qp_buf(struct ibv_pd *pd, struct ibv_qp_cap *cap,
+int mthca_alloc_qp_buf(struct ibv_pd *pd, struct ibv_qp_cap *cap,
ib_qp_type_t type, struct mthca_qp *qp);
-extern struct mthca_qp *mthca_find_qp(struct mthca_context *ctx, uint32_t qpn);
-extern int mthca_store_qp(struct mthca_context *ctx, uint32_t qpn, struct mthca_qp *qp);
-extern void mthca_clear_qp(struct mthca_context *ctx, uint32_t qpn);
-extern int mthca_free_err_wqe(struct mthca_qp *qp, int is_send,
+struct mthca_qp *mthca_find_qp(struct mthca_context *ctx, uint32_t qpn);
+int mthca_store_qp(struct mthca_context *ctx, uint32_t qpn, struct mthca_qp *qp);
+void mthca_clear_qp(struct mthca_context *ctx, uint32_t qpn);
+int mthca_free_err_wqe(struct mthca_qp *qp, int is_send,
int index, int *dbd, uint32_t *new_wqe);
-extern int mthca_create_ah_pre(struct ibv_pd *pd, struct ibv_create_ah *req);
-extern struct ibv_ah *mthca_create_ah_post(struct ibv_pd *pd,
+int mthca_create_ah_pre(struct ibv_pd *pd, struct ibv_create_ah *req);
+ struct ibv_ah *mthca_create_ah_post(struct ibv_pd *pd,
struct ibv_ah_attr *attr, struct ibv_create_ah_resp *resp);
-extern int mthca_destroy_ah(struct ibv_ah *ah);
-extern int mthca_alloc_av(struct mthca_pd *pd, struct ibv_ah_attr *attr,
+int mthca_destroy_ah(struct ibv_ah *ah);
+int mthca_alloc_av(struct mthca_pd *pd, struct ibv_ah_attr *attr,
struct mthca_ah *ah, struct ibv_create_ah_resp *resp);
-extern void mthca_free_av(struct mthca_ah *ah);
-extern int mthca_attach_mcast(struct ibv_qp *qp, union ibv_gid *gid, uint16_t lid);
-extern int mthca_detach_mcast(struct ibv_qp *qp, union ibv_gid *gid, uint16_t lid);
+void mthca_free_av(struct mthca_ah *ah);
+int mthca_attach_mcast(struct ibv_qp *qp, union ibv_gid *gid, uint16_t lid);
+int mthca_detach_mcast(struct ibv_qp *qp, union ibv_gid *gid, uint16_t lid);
struct ibv_context *mthca_alloc_context(struct ibv_get_context_resp *resp_p);
void mthca_free_context(struct ibv_context *ibctx);
wmb();
((struct mthca_next_seg *) prev_wqe)->ee_nds =
- cl_hton32((size0 ? 0 : MTHCA_NEXT_DBD) | size);
+ cl_hton32((size0 ? 0 : MTHCA_NEXT_DBD) | size |
+ ((wr->send_opt& IB_SEND_OPT_FENCE) ?
+ MTHCA_NEXT_FENCE : 0));
if (!size0) {
size0 = size;
size0 = 0;
}
- if (mthca_wq_overflow(&qp->rq, nreq, to_mcq(qp->ibv_qp.send_cq))) {
+ if (mthca_wq_overflow(&qp->rq, nreq, to_mcq(qp->ibv_qp.recv_cq))) {
UVP_PRINT(TRACE_LEVEL_ERROR,UVP_DBG_QP,("RQ %06x full (%u head, %u tail,"
" %d max, %d nreq)\n", ibqp->qp_num,
qp->rq.head, qp->rq.tail,
wmb();
((struct mthca_next_seg *) prev_wqe)->ee_nds =
cl_hton32(MTHCA_NEXT_DBD | size);
+ cl_hton32(MTHCA_NEXT_DBD | size |
+ ((wr->send_opt & IB_SEND_OPT_FENCE) ?
+ MTHCA_NEXT_FENCE : 0));
if (!size0) {
size0 = size;
if(ibqp->state == IBV_QPS_RESET)
return -EBUSY;
for (nreq = 0; wr; ++nreq, wr = wr->p_next) {
- if (mthca_wq_overflow(&qp->rq, nreq, to_mcq(qp->ibv_qp.send_cq))) {//TODO sleybo: check the cq
+ if (mthca_wq_overflow(&qp->rq, nreq, to_mcq(qp->ibv_qp.recv_cq))) {//TODO sleybo: check the cq
UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP ,("RQ %06x full (%u head, %u tail,"
" %d max, %d nreq)\n", ibqp->qp_num,
qp->rq.head, qp->rq.tail,