]> git.openfabrics.org - ~emulex/infiniband.git/commitdiff
drm/i915: Convert active API to VMA
authorBen Widawsky <ben@bwidawsk.net>
Tue, 24 Sep 2013 16:57:58 +0000 (09:57 -0700)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Tue, 1 Oct 2013 05:45:21 +0000 (07:45 +0200)
Even though we track object activity and not VMA, because we have the
active_list be based on the VM, it makes the most sense to use VMAs in
the APIs.

NOTE: Daniel intends to eventually rip out active/inactive LRUs, but for
now, leave them be.

v2: Remove leftover hunk from the previous patch which didn't keep
i915_gem_object_move_to_active. That patch had to rely on the ring to
get the dev instead of the obj. (Chris)

Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_context.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c

index a3d43b25e1d9ea84cf7fd93d41ddbeb2ffd9c54e..7ccb0cd35474365be7424093f910c895283fff1a 100644 (file)
@@ -1905,9 +1905,8 @@ static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
 int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
 int i915_gem_object_sync(struct drm_i915_gem_object *obj,
                         struct intel_ring_buffer *to);
-void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
-                                   struct intel_ring_buffer *ring);
-
+void i915_vma_move_to_active(struct i915_vma *vma,
+                            struct intel_ring_buffer *ring);
 int i915_gem_dumb_create(struct drm_file *file_priv,
                         struct drm_device *dev,
                         struct drm_mode_create_dumb *args);
index 0d6b012b4f605576b1bcc95960dd1d3107051fa7..1a0b74d56cd7976a654e3839f062e0e794314f86 100644 (file)
@@ -1917,7 +1917,7 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
        return 0;
 }
 
-void
+static void
 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
                               struct intel_ring_buffer *ring)
 {
@@ -1956,6 +1956,13 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
        }
 }
 
+void i915_vma_move_to_active(struct i915_vma *vma,
+                            struct intel_ring_buffer *ring)
+{
+       list_move_tail(&vma->mm_list, &vma->vm->active_list);
+       return i915_gem_object_move_to_active(vma->obj, ring);
+}
+
 static void
 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
 {
index 9af3fe7e42b0297608eddca5cc335666719bf8ce..1a877a547290a52f53d7df9c5116fb2e758e604c 100644 (file)
@@ -453,11 +453,8 @@ static int do_switch(struct i915_hw_context *to)
         * MI_SET_CONTEXT instead of when the next seqno has completed.
         */
        if (from != NULL) {
-               struct drm_i915_private *dev_priv = from->obj->base.dev->dev_private;
-               struct i915_address_space *ggtt = &dev_priv->gtt.base;
                from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
-               list_move_tail(&i915_gem_obj_to_vma(from->obj, ggtt)->mm_list, &ggtt->active_list);
-               i915_gem_object_move_to_active(from->obj, ring);
+               i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->obj), ring);
                /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
                 * whole damn pipeline, we don't need to explicitly mark the
                 * object dirty. The only exception is that the context must be
index da23cfe3902b3bdd06e9ddb902daca30ca375614..0ce0d47e4b0ffbafda92966e1916b732f5b84968 100644 (file)
@@ -872,8 +872,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
                obj->base.read_domains = obj->base.pending_read_domains;
                obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
 
-               list_move_tail(&vma->mm_list, &vma->vm->active_list);
-               i915_gem_object_move_to_active(obj, ring);
+               i915_vma_move_to_active(vma, ring);
                if (obj->base.write_domain) {
                        obj->dirty = 1;
                        obj->last_write_seqno = intel_ring_get_seqno(ring);