]> git.openfabrics.org - ~emulex/infiniband.git/commitdiff
block: Rename queue dead flag
authorBart Van Assche <bvanassche@acm.org>
Wed, 28 Nov 2012 12:42:38 +0000 (13:42 +0100)
committerJens Axboe <axboe@kernel.dk>
Thu, 6 Dec 2012 13:30:58 +0000 (14:30 +0100)
QUEUE_FLAG_DEAD is used to indicate that queuing new requests must
stop. After this flag has been set queue draining starts. However,
during the queue draining phase it is still safe to invoke the
queue's request_fn, so QUEUE_FLAG_DYING is a better name for this
flag.

This patch has been generated by running the following command
over the kernel source tree:

git grep -lEw 'blk_queue_dead|QUEUE_FLAG_DEAD' |
    xargs sed -i.tmp -e 's/blk_queue_dead/blk_queue_dying/g'      \
        -e 's/QUEUE_FLAG_DEAD/QUEUE_FLAG_DYING/g';                \
sed -i.tmp -e "s/QUEUE_FLAG_DYING$(printf \\t)*5/QUEUE_FLAG_DYING$(printf \\t)5/g" \
    include/linux/blkdev.h;                                       \
sed -i.tmp -e 's/ DEAD/ DYING/g' -e 's/dead queue/a dying queue/' \
    -e 's/Dead queue/A dying queue/' block/blk-core.c

Signed-off-by: Bart Van Assche <bvanassche@acm.org>
Acked-by: Tejun Heo <tj@kernel.org>
Cc: James Bottomley <JBottomley@Parallels.com>
Cc: Mike Christie <michaelc@cs.wisc.edu>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Chanho Min <chanho.min@lge.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-cgroup.c
block/blk-core.c
block/blk-exec.c
block/blk-sysfs.c
block/blk-throttle.c
block/blk.h
drivers/scsi/scsi_lib.c
include/linux/blkdev.h

index d0b770391ad400ad0312049c84a2b522bf985bf1..5dea4e8dbc55e97cb6becee1431b9dbdaa9ac9da 100644 (file)
@@ -231,7 +231,7 @@ struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
         * we shouldn't allow anything to go through for a bypassing queue.
         */
        if (unlikely(blk_queue_bypass(q)))
-               return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
+               return ERR_PTR(blk_queue_dying(q) ? -EINVAL : -EBUSY);
        return __blkg_lookup_create(blkcg, q, NULL);
 }
 EXPORT_SYMBOL_GPL(blkg_lookup_create);
index ee0e5cafa85954e036234ffda5ed747428d05c89..1a95272cca505837bbb656617320063b5500b74a 100644 (file)
@@ -473,20 +473,20 @@ EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
  * blk_cleanup_queue - shutdown a request queue
  * @q: request queue to shutdown
  *
- * Mark @q DEAD, drain all pending requests, destroy and put it.  All
+ * Mark @q DYING, drain all pending requests, destroy and put it.  All
  * future requests will be failed immediately with -ENODEV.
  */
 void blk_cleanup_queue(struct request_queue *q)
 {
        spinlock_t *lock = q->queue_lock;
 
-       /* mark @q DEAD, no new request or merges will be allowed afterwards */
+       /* mark @q DYING, no new request or merges will be allowed afterwards */
        mutex_lock(&q->sysfs_lock);
-       queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
+       queue_flag_set_unlocked(QUEUE_FLAG_DYING, q);
        spin_lock_irq(lock);
 
        /*
-        * Dead queue is permanently in bypass mode till released.  Note
+        * A dying queue is permanently in bypass mode till released.  Note
         * that, unlike blk_queue_bypass_start(), we aren't performing
         * synchronize_rcu() after entering bypass mode to avoid the delay
         * as some drivers create and destroy a lot of queues while
@@ -499,11 +499,11 @@ void blk_cleanup_queue(struct request_queue *q)
 
        queue_flag_set(QUEUE_FLAG_NOMERGES, q);
        queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
-       queue_flag_set(QUEUE_FLAG_DEAD, q);
+       queue_flag_set(QUEUE_FLAG_DYING, q);
        spin_unlock_irq(lock);
        mutex_unlock(&q->sysfs_lock);
 
-       /* drain all requests queued before DEAD marking */
+       /* drain all requests queued before DYING marking */
        blk_drain_queue(q, true);
 
        /* @q won't process any more request, flush async actions */
@@ -716,7 +716,7 @@ EXPORT_SYMBOL(blk_init_allocated_queue);
 
 bool blk_get_queue(struct request_queue *q)
 {
-       if (likely(!blk_queue_dead(q))) {
+       if (likely(!blk_queue_dying(q))) {
                __blk_get_queue(q);
                return true;
        }
@@ -870,7 +870,7 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
        const bool is_sync = rw_is_sync(rw_flags) != 0;
        int may_queue;
 
-       if (unlikely(blk_queue_dead(q)))
+       if (unlikely(blk_queue_dying(q)))
                return NULL;
 
        may_queue = elv_may_queue(q, rw_flags);
@@ -1050,7 +1050,7 @@ retry:
        if (rq)
                return rq;
 
-       if (!(gfp_mask & __GFP_WAIT) || unlikely(blk_queue_dead(q))) {
+       if (!(gfp_mask & __GFP_WAIT) || unlikely(blk_queue_dying(q))) {
                blk_put_rl(rl);
                return NULL;
        }
@@ -1910,7 +1910,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
                return -EIO;
 
        spin_lock_irqsave(q->queue_lock, flags);
-       if (unlikely(blk_queue_dead(q))) {
+       if (unlikely(blk_queue_dying(q))) {
                spin_unlock_irqrestore(q->queue_lock, flags);
                return -ENODEV;
        }
@@ -2885,9 +2885,9 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth,
        trace_block_unplug(q, depth, !from_schedule);
 
        /*
-        * Don't mess with dead queue.
+        * Don't mess with a dying queue.
         */
-       if (unlikely(blk_queue_dead(q))) {
+       if (unlikely(blk_queue_dying(q))) {
                spin_unlock(q->queue_lock);
                return;
        }
@@ -2996,7 +2996,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
                /*
                 * Short-circuit if @q is dead
                 */
-               if (unlikely(blk_queue_dead(q))) {
+               if (unlikely(blk_queue_dying(q))) {
                        __blk_end_request_all(rq, -ENODEV);
                        continue;
                }
index 8b6dc5bd4dd061336172b144ae8c09da6bbc56f8..4aec98df7ba5be7fbed41f0a025fb5c177c614d6 100644 (file)
@@ -60,7 +60,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
 
        spin_lock_irq(q->queue_lock);
 
-       if (unlikely(blk_queue_dead(q))) {
+       if (unlikely(blk_queue_dying(q))) {
                rq->errors = -ENXIO;
                if (rq->end_io)
                        rq->end_io(rq, rq->errors);
index ce6204608822462c18bda72c7ca2403fd17ec5eb..788147797a798b0041e8d3fa3bf70955016b0915 100644 (file)
@@ -466,7 +466,7 @@ queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
        if (!entry->show)
                return -EIO;
        mutex_lock(&q->sysfs_lock);
-       if (blk_queue_dead(q)) {
+       if (blk_queue_dying(q)) {
                mutex_unlock(&q->sysfs_lock);
                return -ENOENT;
        }
@@ -488,7 +488,7 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
 
        q = container_of(kobj, struct request_queue, kobj);
        mutex_lock(&q->sysfs_lock);
-       if (blk_queue_dead(q)) {
+       if (blk_queue_dying(q)) {
                mutex_unlock(&q->sysfs_lock);
                return -ENOENT;
        }
index a9664fa0b6097ace6a48d48e0992a6d83ed32a9d..31146225f3d078fb5326e30195a0fa138f1f3e5a 100644 (file)
@@ -302,7 +302,7 @@ static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td,
                /* if %NULL and @q is alive, fall back to root_tg */
                if (!IS_ERR(blkg))
                        tg = blkg_to_tg(blkg);
-               else if (!blk_queue_dead(q))
+               else if (!blk_queue_dying(q))
                        tg = td_root_tg(td);
        }
 
index ca51543b248ca16f0eb28e937ec321e52f3130c8..2218a8a78292a8dd510e0543900d37a875d65cc5 100644 (file)
@@ -96,7 +96,7 @@ static inline struct request *__elv_next_request(struct request_queue *q)
                        q->flush_queue_delayed = 1;
                        return NULL;
                }
-               if (unlikely(blk_queue_dead(q)) ||
+               if (unlikely(blk_queue_dying(q)) ||
                    !q->elevator->type->ops.elevator_dispatch_fn(q, 0))
                        return NULL;
        }
index da36a3a81a9ee2206f753a04d4fbbe91b883e00a..f29a1a9b54d247c6568764679d3eb8d44cfe5a2a 100644 (file)
@@ -1406,7 +1406,7 @@ static int scsi_lld_busy(struct request_queue *q)
        struct scsi_device *sdev = q->queuedata;
        struct Scsi_Host *shost;
 
-       if (blk_queue_dead(q))
+       if (blk_queue_dying(q))
                return 0;
 
        shost = sdev->host;
index 1756001210d23a7f32037a3bfb21579202c8c380..aba8246afe72c5b870474e855d0d0c5eb5713fad 100644 (file)
@@ -437,7 +437,7 @@ struct request_queue {
 #define QUEUE_FLAG_STOPPED     2       /* queue is stopped */
 #define        QUEUE_FLAG_SYNCFULL     3       /* read queue has been filled */
 #define QUEUE_FLAG_ASYNCFULL   4       /* write queue has been filled */
-#define QUEUE_FLAG_DEAD                5       /* queue being torn down */
+#define QUEUE_FLAG_DYING       5       /* queue being torn down */
 #define QUEUE_FLAG_BYPASS      6       /* act as dumb FIFO queue */
 #define QUEUE_FLAG_BIDI                7       /* queue supports bidi requests */
 #define QUEUE_FLAG_NOMERGES     8      /* disable merge attempts */
@@ -521,7 +521,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
 
 #define blk_queue_tagged(q)    test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
 #define blk_queue_stopped(q)   test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
-#define blk_queue_dead(q)      test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
+#define blk_queue_dying(q)     test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
 #define blk_queue_bypass(q)    test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags)
 #define blk_queue_nomerges(q)  test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
 #define blk_queue_noxmerges(q) \