]> git.openfabrics.org - ~emulex/infiniband.git/commitdiff
drm/i915: Remove the deferred-free list
authorChris Wilson <chris@chris-wilson.co.uk>
Tue, 24 Apr 2012 14:47:31 +0000 (15:47 +0100)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Thu, 3 May 2012 09:18:11 +0000 (11:18 +0200)
The use of the mm_list by deferred-free breaks the following patches to
extend the range of objects tracked. We can simplify things if we just
make the unbind during free uninterrutible.

Note that unbinding should never fail, because we hold an additional
reference on every active object. Only the ilk vt-d workaround breaks
this, but already takes care of not failing by waiting for the gpu to
quiescent non-interruptible. But the existence of the deferred free
list casted some doubts on this theory, hence WARN if the unbind fails
and only then retry non-interruptible.

We can kill this additional code after a release in case the theory is
indeed right and no one has hit that WARN.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c

index ecf746837b23f24230c68a85388e3f40f209c85f..120db4687a2f6eafd5b2218068298242121bc0cf 100644 (file)
@@ -47,7 +47,6 @@ enum {
        FLUSHING_LIST,
        INACTIVE_LIST,
        PINNED_LIST,
-       DEFERRED_FREE_LIST,
 };
 
 static const char *yesno(int v)
@@ -182,10 +181,6 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
                seq_printf(m, "Flushing:\n");
                head = &dev_priv->mm.flushing_list;
                break;
-       case DEFERRED_FREE_LIST:
-               seq_printf(m, "Deferred free:\n");
-               head = &dev_priv->mm.deferred_free_list;
-               break;
        default:
                mutex_unlock(&dev->struct_mutex);
                return -EINVAL;
@@ -252,11 +247,6 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
        seq_printf(m, "  %u [%u] inactive objects, %zu [%zu] bytes\n",
                   count, mappable_count, size, mappable_size);
 
-       size = count = mappable_size = mappable_count = 0;
-       count_objects(&dev_priv->mm.deferred_free_list, mm_list);
-       seq_printf(m, "  %u [%u] freed objects, %zu [%zu] bytes\n",
-                  count, mappable_count, size, mappable_size);
-
        size = count = mappable_size = mappable_count = 0;
        list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
                if (obj->fault_mappable) {
@@ -1840,7 +1830,6 @@ static struct drm_info_list i915_debugfs_list[] = {
        {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
        {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
        {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
-       {"i915_gem_deferred_free", i915_gem_object_list_info, 0, (void *) DEFERRED_FREE_LIST},
        {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
        {"i915_gem_request", i915_gem_request_info, 0},
        {"i915_gem_seqno", i915_gem_seqno_info, 0},
index 560ce7f44a3ba5b3f55caa1fb708eebca6fd96d9..21127aff3900b6bf350b65b08904cd435328bfc0 100644 (file)
@@ -692,14 +692,6 @@ typedef struct drm_i915_private {
                /** LRU list of objects with fence regs on them. */
                struct list_head fence_list;
 
-               /**
-                * List of objects currently pending being freed.
-                *
-                * These objects are no longer in use, but due to a signal
-                * we were prevented from freeing them at the appointed time.
-                */
-               struct list_head deferred_free_list;
-
                /**
                 * We leave the user IRQ off as much as possible,
                 * but this means that requests will finish and never
index bf4683c9aed85ded7e154e8af1e23afbc5dc1baa..9bf11c8fe920d27e9e3ce970fe509bb2487716a0 100644 (file)
@@ -46,7 +46,6 @@ static int i915_gem_phys_pwrite(struct drm_device *dev,
                                struct drm_i915_gem_object *obj,
                                struct drm_i915_gem_pwrite *args,
                                struct drm_file *file);
-static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj);
 
 static void i915_gem_write_fence(struct drm_device *dev, int reg,
                                 struct drm_i915_gem_object *obj);
@@ -1782,20 +1781,6 @@ i915_gem_retire_requests(struct drm_device *dev)
        drm_i915_private_t *dev_priv = dev->dev_private;
        int i;
 
-       if (!list_empty(&dev_priv->mm.deferred_free_list)) {
-           struct drm_i915_gem_object *obj, *next;
-
-           /* We must be careful that during unbind() we do not
-            * accidentally infinitely recurse into retire requests.
-            * Currently:
-            *   retire -> free -> unbind -> wait -> retire_ring
-            */
-           list_for_each_entry_safe(obj, next,
-                                    &dev_priv->mm.deferred_free_list,
-                                    mm_list)
-                   i915_gem_free_object_tail(obj);
-       }
-
        for (i = 0; i < I915_NUM_RINGS; i++)
                i915_gem_retire_requests_ring(&dev_priv->ring[i]);
 }
@@ -2067,7 +2052,7 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
        }
 
        ret = i915_gem_object_finish_gpu(obj);
-       if (ret == -ERESTARTSYS)
+       if (ret)
                return ret;
        /* Continue on if we fail due to EIO, the GPU is hung so we
         * should be safe and we need to cleanup or else we might
@@ -2094,7 +2079,7 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
 
        /* release the fence reg _after_ flushing */
        ret = i915_gem_object_put_fence(obj);
-       if (ret == -ERESTARTSYS)
+       if (ret)
                return ret;
 
        trace_i915_gem_object_unbind(obj);
@@ -3377,21 +3362,29 @@ int i915_gem_init_object(struct drm_gem_object *obj)
        return 0;
 }
 
-static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj)
+void i915_gem_free_object(struct drm_gem_object *gem_obj)
 {
+       struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
        struct drm_device *dev = obj->base.dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       int ret;
-
-       ret = i915_gem_object_unbind(obj);
-       if (ret == -ERESTARTSYS) {
-               list_move(&obj->mm_list,
-                         &dev_priv->mm.deferred_free_list);
-               return;
-       }
 
        trace_i915_gem_object_destroy(obj);
 
+       if (obj->phys_obj)
+               i915_gem_detach_phys_object(dev, obj);
+
+       obj->pin_count = 0;
+       if (WARN_ON(i915_gem_object_unbind(obj) == -ERESTARTSYS)) {
+               bool was_interruptible;
+
+               was_interruptible = dev_priv->mm.interruptible;
+               dev_priv->mm.interruptible = false;
+
+               WARN_ON(i915_gem_object_unbind(obj));
+
+               dev_priv->mm.interruptible = was_interruptible;
+       }
+
        if (obj->base.map_list.map)
                drm_gem_free_mmap_offset(&obj->base);
 
@@ -3402,18 +3395,6 @@ static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj)
        kfree(obj);
 }
 
-void i915_gem_free_object(struct drm_gem_object *gem_obj)
-{
-       struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
-       struct drm_device *dev = obj->base.dev;
-
-       if (obj->phys_obj)
-               i915_gem_detach_phys_object(dev, obj);
-
-       obj->pin_count = 0;
-       i915_gem_free_object_tail(obj);
-}
-
 int
 i915_gem_idle(struct drm_device *dev)
 {
@@ -3679,7 +3660,6 @@ i915_gem_load(struct drm_device *dev)
        INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
        INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
        INIT_LIST_HEAD(&dev_priv->mm.fence_list);
-       INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
        INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
        for (i = 0; i < I915_NUM_RINGS; i++)
                init_ring_lists(&dev_priv->ring[i]);