From c58cf4f1082d81c42be89e9c6eeca6466954a70c Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Thu, 26 Apr 2012 16:03:02 -0700 Subject: [PATCH] drm/i915: drop polled waits from i915_wait_request The only time irq_get should fail is during unload or suspend. Both of these points should try to quiesce the GPU before disabling interrupts and so the atomic polling should never occur. This was recommended by Chris Wilson as a way of reducing added complexity to the polled wait which I introduced in an RFC patch. 09:57 < ickle_> it's only there as a fudge for waiting after irqs after uninstalled during s&r, we aren't actually meant to hit it 09:57 < ickle_> so maybe we should just kill the code there and fix the breakage v2: return -ENODEV instead of -EBUSY when irq_get fails Signed-off-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 25 +++++++++++-------------- 1 file changed, 11 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 292b4a9e7ba..148e04baceb 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1864,22 +1864,19 @@ i915_wait_request(struct intel_ring_buffer *ring, if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) { trace_i915_gem_request_wait_begin(ring, seqno); - if (ring->irq_get(ring)) { - if (dev_priv->mm.interruptible) - ret = wait_event_interruptible(ring->irq_queue, - i915_seqno_passed(ring->get_seqno(ring), seqno) - || atomic_read(&dev_priv->mm.wedged)); - else - wait_event(ring->irq_queue, - i915_seqno_passed(ring->get_seqno(ring), seqno) - || atomic_read(&dev_priv->mm.wedged)); + if (WARN_ON(!ring->irq_get(ring))) + return -ENODEV; - ring->irq_put(ring); - } else if (wait_for_atomic(i915_seqno_passed(ring->get_seqno(ring), - seqno) || - atomic_read(&dev_priv->mm.wedged), 3000)) - ret = -EBUSY; + if (dev_priv->mm.interruptible) + ret = wait_event_interruptible(ring->irq_queue, + i915_seqno_passed(ring->get_seqno(ring), seqno) + || atomic_read(&dev_priv->mm.wedged)); + else + wait_event(ring->irq_queue, + i915_seqno_passed(ring->get_seqno(ring), seqno) + || atomic_read(&dev_priv->mm.wedged)); + ring->irq_put(ring); trace_i915_gem_request_wait_end(ring, seqno); } if (atomic_read(&dev_priv->mm.wedged)) -- 2.41.0