]> git.openfabrics.org - ~emulex/infiniband.git/commitdiff
block: extend queue bypassing to cover blkcg policies
authorTejun Heo <tj@kernel.org>
Mon, 5 Mar 2012 21:14:59 +0000 (13:14 -0800)
committerJens Axboe <axboe@kernel.dk>
Tue, 6 Mar 2012 20:27:22 +0000 (21:27 +0100)
Extend queue bypassing such that dying queue is always bypassing and
blk-throttle is drained on bypass.  With blkcg policies updated to
test blk_queue_bypass() instead of blk_queue_dead(), this ensures that
no bio or request is held by or going through blkcg policies on a
bypassing queue.

This will be used to implement blkg cleanup on elevator switches and
policy changes.

Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-core.c
block/blk-throttle.c

index 98ddef43009333d1ad81413e267a776862c8530a..7713c73d9590454adccc3d6855007e58ea383f94 100644 (file)
@@ -372,8 +372,7 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
                if (q->elevator)
                        elv_drain_elevator(q);
 
-               if (drain_all)
-                       blk_throtl_drain(q);
+               blk_throtl_drain(q);
 
                /*
                 * This function might be called on a queue which failed
@@ -415,8 +414,8 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
  *
  * In bypass mode, only the dispatch FIFO queue of @q is used.  This
  * function makes @q enter bypass mode and drains all requests which were
- * issued before.  On return, it's guaranteed that no request has ELVPRIV
- * set.
+ * throttled or issued before.  On return, it's guaranteed that no request
+ * is being throttled or has ELVPRIV set.
  */
 void blk_queue_bypass_start(struct request_queue *q)
 {
@@ -461,6 +460,11 @@ void blk_cleanup_queue(struct request_queue *q)
        queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
 
        spin_lock_irq(lock);
+
+       /* dead queue is permanently in bypass mode till released */
+       q->bypass_depth++;
+       queue_flag_set(QUEUE_FLAG_BYPASS, q);
+
        queue_flag_set(QUEUE_FLAG_NOMERGES, q);
        queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
        queue_flag_set(QUEUE_FLAG_DEAD, q);
index 5eed6a76721d1d78e8105f4bbde30e41f20a66cd..702c0e64e09fd00921b68006fc6cf5e6da9bf2e8 100644 (file)
@@ -310,7 +310,7 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
        struct request_queue *q = td->queue;
 
        /* no throttling for dead queue */
-       if (unlikely(blk_queue_dead(q)))
+       if (unlikely(blk_queue_bypass(q)))
                return NULL;
 
        rcu_read_lock();
@@ -335,7 +335,7 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
        spin_lock_irq(q->queue_lock);
 
        /* Make sure @q is still alive */
-       if (unlikely(blk_queue_dead(q))) {
+       if (unlikely(blk_queue_bypass(q))) {
                kfree(tg);
                return NULL;
        }