]> git.openfabrics.org - ~emulex/infiniband.git/commitdiff
drm/i915: Merge ring flushing and lazy requests
authorChris Wilson <chris@chris-wilson.co.uk>
Mon, 20 Sep 2010 11:50:23 +0000 (12:50 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Tue, 21 Sep 2010 10:24:16 +0000 (11:24 +0100)
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/intel_display.c

index 6e22be4f3585027207e4a52a4ac1ac27348f98da..37a44c80efd2fe1ea605cfae4062c47caa96b28a 100644 (file)
@@ -1003,6 +1003,7 @@ void i915_gem_reset_flushing_list(struct drm_device *dev);
 void i915_gem_reset_inactive_gpu_domains(struct drm_device *dev);
 void i915_gem_clflush_object(struct drm_gem_object *obj);
 void i915_gem_flush_ring(struct drm_device *dev,
+                        struct drm_file *file_priv,
                         struct intel_ring_buffer *ring,
                         uint32_t invalidate_domains,
                         uint32_t flush_domains);
index a5d5751bad30187bf1a147dee163cdc3131a62c7..58baecc821a52523c793eb58b9ae253c46cfd953 100644 (file)
@@ -1910,16 +1910,23 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno,
 
 void
 i915_gem_flush_ring(struct drm_device *dev,
+                   struct drm_file *file_priv,
                    struct intel_ring_buffer *ring,
                    uint32_t invalidate_domains,
                    uint32_t flush_domains)
 {
        ring->flush(dev, ring, invalidate_domains, flush_domains);
        i915_gem_process_flushing_list(dev, flush_domains, ring);
+
+       if (ring->outstanding_lazy_request) {
+               (void)i915_add_request(dev, file_priv, NULL, ring);
+               ring->outstanding_lazy_request = false;
+       }
 }
 
 static void
 i915_gem_flush(struct drm_device *dev,
+              struct drm_file *file_priv,
               uint32_t invalidate_domains,
               uint32_t flush_domains,
               uint32_t flush_rings)
@@ -1931,11 +1938,11 @@ i915_gem_flush(struct drm_device *dev,
 
        if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
                if (flush_rings & RING_RENDER)
-                       i915_gem_flush_ring(dev,
+                       i915_gem_flush_ring(dev, file_priv,
                                            &dev_priv->render_ring,
                                            invalidate_domains, flush_domains);
                if (flush_rings & RING_BSD)
-                       i915_gem_flush_ring(dev,
+                       i915_gem_flush_ring(dev, file_priv,
                                            &dev_priv->bsd_ring,
                                            invalidate_domains, flush_domains);
        }
@@ -2054,6 +2061,7 @@ i915_gpu_idle(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        bool lists_empty;
+       u32 seqno;
        int ret;
 
        lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
@@ -2064,24 +2072,18 @@ i915_gpu_idle(struct drm_device *dev)
                return 0;
 
        /* Flush everything onto the inactive list. */
-       i915_gem_flush_ring(dev,
-                           &dev_priv->render_ring,
+       seqno = i915_gem_next_request_seqno(dev, &dev_priv->render_ring);
+       i915_gem_flush_ring(dev, NULL, &dev_priv->render_ring,
                            I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
-
-       ret = i915_wait_request(dev,
-                               i915_gem_next_request_seqno(dev, &dev_priv->render_ring),
-                               &dev_priv->render_ring);
+       ret = i915_wait_request(dev, seqno, &dev_priv->render_ring);
        if (ret)
                return ret;
 
        if (HAS_BSD(dev)) {
-               i915_gem_flush_ring(dev,
-                                   &dev_priv->bsd_ring,
+               seqno = i915_gem_next_request_seqno(dev, &dev_priv->render_ring);
+               i915_gem_flush_ring(dev, NULL, &dev_priv->bsd_ring,
                                    I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
-
-               ret = i915_wait_request(dev,
-                                       i915_gem_next_request_seqno(dev, &dev_priv->bsd_ring),
-                                       &dev_priv->bsd_ring);
+               ret = i915_wait_request(dev, seqno, &dev_priv->bsd_ring);
                if (ret)
                        return ret;
        }
@@ -2651,7 +2653,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
 
        /* Queue the GPU write cache flushing we need. */
        old_write_domain = obj->write_domain;
-       i915_gem_flush_ring(dev,
+       i915_gem_flush_ring(dev, NULL,
                            to_intel_bo(obj)->ring,
                            0, obj->write_domain);
        BUG_ON(obj->write_domain);
@@ -2780,7 +2782,7 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
        i915_gem_object_flush_cpu_write_domain(obj);
 
        old_read_domains = obj->read_domains;
-       obj->read_domains = I915_GEM_DOMAIN_GTT;
+       obj->read_domains |= I915_GEM_DOMAIN_GTT;
 
        trace_i915_gem_object_change_domain(obj,
                                            old_read_domains,
@@ -2837,7 +2839,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
         * need to be invalidated at next use.
         */
        if (write) {
-               obj->read_domains &= I915_GEM_DOMAIN_CPU;
+               obj->read_domains = I915_GEM_DOMAIN_CPU;
                obj->write_domain = I915_GEM_DOMAIN_CPU;
        }
 
@@ -3762,21 +3764,12 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                         dev->invalidate_domains,
                         dev->flush_domains);
 #endif
-               i915_gem_flush(dev,
+               i915_gem_flush(dev, file_priv,
                               dev->invalidate_domains,
                               dev->flush_domains,
                               dev_priv->mm.flush_rings);
        }
 
-       if (dev_priv->render_ring.outstanding_lazy_request) {
-               (void)i915_add_request(dev, file_priv, NULL, &dev_priv->render_ring);
-               dev_priv->render_ring.outstanding_lazy_request = false;
-       }
-       if (dev_priv->bsd_ring.outstanding_lazy_request) {
-               (void)i915_add_request(dev, file_priv, NULL, &dev_priv->bsd_ring);
-               dev_priv->bsd_ring.outstanding_lazy_request = false;
-       }
-
        for (i = 0; i < args->buffer_count; i++) {
                struct drm_gem_object *obj = object_list[i];
                struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
@@ -4232,12 +4225,10 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
                 * use this buffer rather sooner than later, so issuing the required
                 * flush earlier is beneficial.
                 */
-               if (obj->write_domain & I915_GEM_GPU_DOMAINS) {
-                       i915_gem_flush_ring(dev,
+               if (obj->write_domain & I915_GEM_GPU_DOMAINS)
+                       i915_gem_flush_ring(dev, file_priv,
                                            obj_priv->ring,
                                            0, obj->write_domain);
-                       (void)i915_add_request(dev, file_priv, NULL, obj_priv->ring);
-               }
 
                /* Update the active list for the hardware's current position.
                 * Otherwise this only updates on a delayed timer or when irqs
index 0505ddb76a10992524e73c7285a7350a5feb718f..791374c888da41d2090a05296f413e3a24374efe 100644 (file)
@@ -5058,7 +5058,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 
        /* Schedule the pipelined flush */
        if (was_dirty)
-               i915_gem_flush_ring(dev, obj_priv->ring, 0, was_dirty);
+               i915_gem_flush_ring(dev, NULL, obj_priv->ring, 0, was_dirty);
 
        if (IS_GEN3(dev) || IS_GEN2(dev)) {
                u32 flip_mask;