diff options
Diffstat (limited to 'block/blk-core.c')
| -rw-r--r-- | block/blk-core.c | 1337 |
1 files changed, 878 insertions, 459 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 3a78b00edd7..6f8dba161bf 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -16,6 +16,7 @@ #include <linux/backing-dev.h> #include <linux/bio.h> #include <linux/blkdev.h> +#include <linux/blk-mq.h> #include <linux/highmem.h> #include <linux/mm.h> #include <linux/kernel_stat.h> @@ -29,22 +30,28 @@ #include <linux/fault-inject.h> #include <linux/list_sort.h> #include <linux/delay.h> +#include <linux/ratelimit.h> +#include <linux/pm_runtime.h> #define CREATE_TRACE_POINTS #include <trace/events/block.h> #include "blk.h" +#include "blk-cgroup.h" +#include "blk-mq.h" EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap); EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete); +EXPORT_TRACEPOINT_SYMBOL_GPL(block_split); +EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug); DEFINE_IDA(blk_queue_ida); /* * For the allocated request tables */ -static struct kmem_cache *request_cachep; +struct kmem_cache *request_cachep = NULL; /* * For queue allocation @@ -56,42 +63,6 @@ struct kmem_cache *blk_requestq_cachep; */ static struct workqueue_struct *kblockd_workqueue; -static void drive_stat_acct(struct request *rq, int new_io) -{ - struct hd_struct *part; - int rw = rq_data_dir(rq); - int cpu; - - if (!blk_do_io_stat(rq)) - return; - - cpu = part_stat_lock(); - - if (!new_io) { - part = rq->part; - part_stat_inc(cpu, part, merges[rw]); - } else { - part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq)); - if (!hd_struct_try_get(part)) { - /* - * The partition is already being removed, - * the request will be accounted on the disk only - * - * We take a reference on disk->part0 although that - * partition will never be deleted, so we can treat - * it as any other partition. - */ - part = &rq->rq_disk->part0; - hd_struct_get(part); - } - part_round_stats(cpu, part); - part_inc_in_flight(part, rw); - rq->part = part; - } - - part_stat_unlock(); -} - void blk_queue_congestion_threshold(struct request_queue *q) { int nr; @@ -141,7 +112,6 @@ void blk_rq_init(struct request_queue *q, struct request *rq) rq->cmd = rq->__cmd; rq->cmd_len = BLK_MAX_CDB; rq->tag = -1; - rq->ref_count = 1; rq->start_time = jiffies; set_start_time_ns(rq); rq->part = NULL; @@ -156,23 +126,13 @@ static void req_bio_endio(struct request *rq, struct bio *bio, else if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) error = -EIO; - if (unlikely(nbytes > bio->bi_size)) { - printk(KERN_ERR "%s: want %u bytes done, %u left\n", - __func__, nbytes, bio->bi_size); - nbytes = bio->bi_size; - } - if (unlikely(rq->cmd_flags & REQ_QUIET)) set_bit(BIO_QUIET, &bio->bi_flags); - bio->bi_size -= nbytes; - bio->bi_sector += (nbytes >> 9); - - if (bio_integrity(bio)) - bio_integrity_advance(bio, nbytes); + bio_advance(bio, nbytes); /* don't actually finish bio if it's part of flush sequence */ - if (bio->bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ)) + if (bio->bi_iter.bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ)) bio_endio(bio, error); } @@ -180,15 +140,15 @@ void blk_dump_rq_flags(struct request *rq, char *msg) { int bit; - printk(KERN_INFO "%s: dev %s: type=%x, flags=%x\n", msg, + printk(KERN_INFO "%s: dev %s: type=%x, flags=%llx\n", msg, rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type, - rq->cmd_flags); + (unsigned long long) rq->cmd_flags); printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n", (unsigned long long)blk_rq_pos(rq), blk_rq_sectors(rq), blk_rq_cur_sectors(rq)); - printk(KERN_INFO " bio %p, biotail %p, buffer %p, len %u\n", - rq->bio, rq->biotail, rq->buffer, blk_rq_bytes(rq)); + printk(KERN_INFO " bio %p, biotail %p, len %u\n", + rq->bio, rq->biotail, blk_rq_bytes(rq)); if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { printk(KERN_INFO " cdb: "); @@ -217,12 +177,13 @@ static void blk_delay_work(struct work_struct *work) * Description: * Sometimes queueing needs to be postponed for a little while, to allow * resources to come back. This function will make sure that queueing is - * restarted around the specified time. + * restarted around the specified time. Queue lock must be held. */ void blk_delay_queue(struct request_queue *q, unsigned long msecs) { - queue_delayed_work(kblockd_workqueue, &q->delay_work, - msecs_to_jiffies(msecs)); + if (likely(!blk_queue_dead(q))) + queue_delayed_work(kblockd_workqueue, &q->delay_work, + msecs_to_jiffies(msecs)); } EXPORT_SYMBOL(blk_delay_queue); @@ -260,7 +221,7 @@ EXPORT_SYMBOL(blk_start_queue); **/ void blk_stop_queue(struct request_queue *q) { - __cancel_delayed_work(&q->delay_work); + cancel_delayed_work(&q->delay_work); queue_flag_set(QUEUE_FLAG_STOPPED, q); } EXPORT_SYMBOL(blk_stop_queue); @@ -280,17 +241,56 @@ EXPORT_SYMBOL(blk_stop_queue); * * This function does not cancel any asynchronous activity arising * out of elevator or throttling code. That would require elevaotor_exit() - * and blk_throtl_exit() to be called with queue lock initialized. + * and blkcg_exit_queue() to be called with queue lock initialized. * */ void blk_sync_queue(struct request_queue *q) { del_timer_sync(&q->timeout); - cancel_delayed_work_sync(&q->delay_work); + + if (q->mq_ops) { + struct blk_mq_hw_ctx *hctx; + int i; + + queue_for_each_hw_ctx(q, hctx, i) { + cancel_delayed_work_sync(&hctx->run_work); + cancel_delayed_work_sync(&hctx->delay_work); + } + } else { + cancel_delayed_work_sync(&q->delay_work); + } } EXPORT_SYMBOL(blk_sync_queue); /** + * __blk_run_queue_uncond - run a queue whether or not it has been stopped + * @q: The queue to run + * + * Description: + * Invoke request handling on a queue if there are any pending requests. + * May be used to restart request handling after a request has completed. + * This variant runs the queue whether or not the queue has been + * stopped. Must be called with the queue lock held and interrupts + * disabled. See also @blk_run_queue. + */ +inline void __blk_run_queue_uncond(struct request_queue *q) +{ + if (unlikely(blk_queue_dead(q))) + return; + + /* + * Some request_fn implementations, e.g. scsi_request_fn(), unlock + * the queue lock internally. As a result multiple threads may be + * running such a request function concurrently. Keep track of the + * number of active request_fn invocations such that blk_drain_queue() + * can wait until all these request_fn calls have finished. + */ + q->request_fn_active++; + q->request_fn(q); + q->request_fn_active--; +} + +/** * __blk_run_queue - run a single device queue * @q: The queue to run * @@ -303,7 +303,7 @@ void __blk_run_queue(struct request_queue *q) if (unlikely(blk_queue_stopped(q))) return; - q->request_fn(q); + __blk_run_queue_uncond(q); } EXPORT_SYMBOL(__blk_run_queue); @@ -313,14 +313,12 @@ EXPORT_SYMBOL(__blk_run_queue); * * Description: * Tells kblockd to perform the equivalent of @blk_run_queue on behalf - * of us. + * of us. The caller must hold the queue lock. */ void blk_run_queue_async(struct request_queue *q) { - if (likely(!blk_queue_stopped(q))) { - __cancel_delayed_work(&q->delay_work); - queue_delayed_work(kblockd_workqueue, &q->delay_work, 0); - } + if (likely(!blk_queue_stopped(q) && !blk_queue_dead(q))) + mod_delayed_work(kblockd_workqueue, &q->delay_work, 0); } EXPORT_SYMBOL(blk_run_queue_async); @@ -349,7 +347,7 @@ void blk_put_queue(struct request_queue *q) EXPORT_SYMBOL(blk_put_queue); /** - * blk_drain_queue - drain requests from request_queue + * __blk_drain_queue - drain requests from request_queue * @q: queue to drain * @drain_all: whether to drain all requests or only the ones w/ ELVPRIV * @@ -357,28 +355,38 @@ EXPORT_SYMBOL(blk_put_queue); * If not, only ELVPRIV requests are drained. The caller is responsible * for ensuring that no new requests which need to be drained are queued. */ -void blk_drain_queue(struct request_queue *q, bool drain_all) +static void __blk_drain_queue(struct request_queue *q, bool drain_all) + __releases(q->queue_lock) + __acquires(q->queue_lock) { + int i; + + lockdep_assert_held(q->queue_lock); + while (true) { bool drain = false; - int i; - spin_lock_irq(q->queue_lock); + /* + * The caller might be trying to drain @q before its + * elevator is initialized. + */ + if (q->elevator) + elv_drain_elevator(q); - elv_drain_elevator(q); - if (drain_all) - blk_throtl_drain(q); + blkcg_drain_queue(q); /* * This function might be called on a queue which failed - * driver init after queue creation. Some drivers - * (e.g. fd) get unhappy in such cases. Kick queue iff - * dispatch queue has something on it. + * driver init after queue creation or is not yet fully + * active yet. Some drivers (e.g. fd and loop) get unhappy + * in such cases. Kick queue iff dispatch queue has + * something on it and @q has request_fn set. */ - if (!list_empty(&q->queue_head)) + if (!list_empty(&q->queue_head) && q->request_fn) __blk_run_queue(q); - drain |= q->rq.elvpriv; + drain |= q->nr_rqs_elvpriv; + drain |= q->request_fn_active; /* * Unfortunately, requests are queued at and tracked from @@ -388,88 +396,174 @@ void blk_drain_queue(struct request_queue *q, bool drain_all) if (drain_all) { drain |= !list_empty(&q->queue_head); for (i = 0; i < 2; i++) { - drain |= q->rq.count[i]; + drain |= q->nr_rqs[i]; drain |= q->in_flight[i]; drain |= !list_empty(&q->flush_queue[i]); } } - spin_unlock_irq(q->queue_lock); - if (!drain) break; + + spin_unlock_irq(q->queue_lock); + msleep(10); + + spin_lock_irq(q->queue_lock); + } + + /* + * With queue marked dead, any woken up waiter will fail the + * allocation path, so the wakeup chaining is lost and we're + * left with hung waiters. We need to wake up those waiters. + */ + if (q->request_fn) { + struct request_list *rl; + + blk_queue_for_each_rl(rl, q) + for (i = 0; i < ARRAY_SIZE(rl->wait); i++) + wake_up_all(&rl->wait[i]); + } +} + +/** + * blk_queue_bypass_start - enter queue bypass mode + * @q: queue of interest + * + * In bypass mode, only the dispatch FIFO queue of @q is used. This + * function makes @q enter bypass mode and drains all requests which were + * throttled or issued before. On return, it's guaranteed that no request + * is being throttled or has ELVPRIV set and blk_queue_bypass() %true + * inside queue or RCU read lock. + */ +void blk_queue_bypass_start(struct request_queue *q) +{ + bool drain; + + spin_lock_irq(q->queue_lock); + drain = !q->bypass_depth++; + queue_flag_set(QUEUE_FLAG_BYPASS, q); + spin_unlock_irq(q->queue_lock); + + if (drain) { + spin_lock_irq(q->queue_lock); + __blk_drain_queue(q, false); + spin_unlock_irq(q->queue_lock); + + /* ensure blk_queue_bypass() is %true inside RCU read lock */ + synchronize_rcu(); } } +EXPORT_SYMBOL_GPL(blk_queue_bypass_start); + +/** + * blk_queue_bypass_end - leave queue bypass mode + * @q: queue of interest + * + * Leave bypass mode and restore the normal queueing behavior. + */ +void blk_queue_bypass_end(struct request_queue *q) +{ + spin_lock_irq(q->queue_lock); + if (!--q->bypass_depth) + queue_flag_clear(QUEUE_FLAG_BYPASS, q); + WARN_ON_ONCE(q->bypass_depth < 0); + spin_unlock_irq(q->queue_lock); +} +EXPORT_SYMBOL_GPL(blk_queue_bypass_end); /** * blk_cleanup_queue - shutdown a request queue * @q: request queue to shutdown * - * Mark @q DEAD, drain all pending requests, destroy and put it. All - * future requests will be failed immediately with -ENODEV. + * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and + * put it. All future requests will be failed immediately with -ENODEV. */ void blk_cleanup_queue(struct request_queue *q) { spinlock_t *lock = q->queue_lock; - /* mark @q DEAD, no new request or merges will be allowed afterwards */ + /* mark @q DYING, no new request or merges will be allowed afterwards */ mutex_lock(&q->sysfs_lock); - queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q); - + queue_flag_set_unlocked(QUEUE_FLAG_DYING, q); spin_lock_irq(lock); - queue_flag_set(QUEUE_FLAG_NOMERGES, q); - queue_flag_set(QUEUE_FLAG_NOXMERGES, q); - queue_flag_set(QUEUE_FLAG_DEAD, q); - if (q->queue_lock != &q->__queue_lock) - q->queue_lock = &q->__queue_lock; + /* + * A dying queue is permanently in bypass mode till released. Note + * that, unlike blk_queue_bypass_start(), we aren't performing + * synchronize_rcu() after entering bypass mode to avoid the delay + * as some drivers create and destroy a lot of queues while + * probing. This is still safe because blk_release_queue() will be + * called only after the queue refcnt drops to zero and nothing, + * RCU or not, would be traversing the queue by then. + */ + q->bypass_depth++; + queue_flag_set(QUEUE_FLAG_BYPASS, q); + queue_flag_set(QUEUE_FLAG_NOMERGES, q); + queue_flag_set(QUEUE_FLAG_NOXMERGES, q); + queue_flag_set(QUEUE_FLAG_DYING, q); spin_unlock_irq(lock); mutex_unlock(&q->sysfs_lock); /* - * Drain all requests queued before DEAD marking. The caller might - * be trying to tear down @q before its elevator is initialized, in - * which case we don't want to call into draining. + * Drain all requests queued before DYING marking. Set DEAD flag to + * prevent that q->request_fn() gets invoked after draining finished. */ - if (q->elevator) - blk_drain_queue(q, true); + if (q->mq_ops) { + blk_mq_drain_queue(q); + spin_lock_irq(lock); + } else { + spin_lock_irq(lock); + __blk_drain_queue(q, true); + } + queue_flag_set(QUEUE_FLAG_DEAD, q); + spin_unlock_irq(lock); /* @q won't process any more request, flush async actions */ del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer); blk_sync_queue(q); + spin_lock_irq(lock); + if (q->queue_lock != &q->__queue_lock) + q->queue_lock = &q->__queue_lock; + spin_unlock_irq(lock); + /* @q is and will stay empty, shutdown and put */ blk_put_queue(q); } EXPORT_SYMBOL(blk_cleanup_queue); -static int blk_init_free_list(struct request_queue *q) +int blk_init_rl(struct request_list *rl, struct request_queue *q, + gfp_t gfp_mask) { - struct request_list *rl = &q->rq; - if (unlikely(rl->rq_pool)) return 0; + rl->q = q; rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0; rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0; - rl->elvpriv = 0; init_waitqueue_head(&rl->wait[BLK_RW_SYNC]); init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]); rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, - mempool_free_slab, request_cachep, q->node); - + mempool_free_slab, request_cachep, + gfp_mask, q->node); if (!rl->rq_pool) return -ENOMEM; return 0; } +void blk_exit_rl(struct request_list *rl) +{ + if (rl->rq_pool) + mempool_destroy(rl->rq_pool); +} + struct request_queue *blk_alloc_queue(gfp_t gfp_mask) { - return blk_alloc_queue_node(gfp_mask, -1); + return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE); } EXPORT_SYMBOL(blk_alloc_queue); @@ -483,7 +577,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) if (!q) return NULL; - q->id = ida_simple_get(&blk_queue_ida, 0, 0, GFP_KERNEL); + q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask); if (q->id < 0) goto fail_q; @@ -498,14 +592,15 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) if (err) goto fail_id; - if (blk_throtl_init(q)) - goto fail_id; - setup_timer(&q->backing_dev_info.laptop_mode_wb_timer, laptop_mode_timer_fn, (unsigned long) q); setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q); + INIT_LIST_HEAD(&q->queue_head); INIT_LIST_HEAD(&q->timeout_list); INIT_LIST_HEAD(&q->icq_list); +#ifdef CONFIG_BLK_CGROUP + INIT_LIST_HEAD(&q->blkg_list); +#endif INIT_LIST_HEAD(&q->flush_queue[0]); INIT_LIST_HEAD(&q->flush_queue[1]); INIT_LIST_HEAD(&q->flush_data_in_flight); @@ -522,8 +617,24 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) */ q->queue_lock = &q->__queue_lock; + /* + * A queue starts its life with bypass turned on to avoid + * unnecessary bypass on/off overhead and nasty surprises during + * init. The initial bypass will be finished when the queue is + * registered by blk_register_queue(). + */ + q->bypass_depth = 1; + __set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags); + + init_waitqueue_head(&q->mq_freeze_wq); + + if (blkcg_init_queue(q)) + goto fail_bdi; + return q; +fail_bdi: + bdi_destroy(&q->backing_dev_info); fail_id: ida_simple_remove(&blk_queue_ida, q->id); fail_q: @@ -567,7 +678,7 @@ EXPORT_SYMBOL(blk_alloc_queue_node); struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock) { - return blk_init_queue_node(rfn, lock, -1); + return blk_init_queue_node(rfn, lock, NUMA_NO_NODE); } EXPORT_SYMBOL(blk_init_queue); @@ -595,13 +706,17 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn, if (!q) return NULL; - if (blk_init_free_list(q)) + q->flush_rq = kzalloc(sizeof(struct request), GFP_KERNEL); + if (!q->flush_rq) return NULL; + if (blk_init_rl(&q->root_rl, q, GFP_KERNEL)) + goto fail; + q->request_fn = rfn; q->prep_rq_fn = NULL; q->unprep_rq_fn = NULL; - q->queue_flags = QUEUE_FLAG_DEFAULT; + q->queue_flags |= QUEUE_FLAG_DEFAULT; /* Override internal queue lock with supplied lock pointer */ if (lock) @@ -614,21 +729,28 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn, q->sg_reserved_size = INT_MAX; - /* - * all done - */ - if (!elevator_init(q, NULL)) { - blk_queue_congestion_threshold(q); - return q; + /* Protect q->elevator from elevator_change */ + mutex_lock(&q->sysfs_lock); + + /* init elevator */ + if (elevator_init(q, NULL)) { + mutex_unlock(&q->sysfs_lock); + goto fail; } + mutex_unlock(&q->sysfs_lock); + + return q; + +fail: + kfree(q->flush_rq); return NULL; } EXPORT_SYMBOL(blk_init_allocated_queue); bool blk_get_queue(struct request_queue *q) { - if (likely(!blk_queue_dead(q))) { + if (likely(!blk_queue_dying(q))) { __blk_get_queue(q); return true; } @@ -637,42 +759,15 @@ bool blk_get_queue(struct request_queue *q) } EXPORT_SYMBOL(blk_get_queue); -static inline void blk_free_request(struct request_queue *q, struct request *rq) +static inline void blk_free_request(struct request_list *rl, struct request *rq) { if (rq->cmd_flags & REQ_ELVPRIV) { - elv_put_request(q, rq); + elv_put_request(rl->q, rq); if (rq->elv.icq) put_io_context(rq->elv.icq->ioc); } - mempool_free(rq, q->rq.rq_pool); -} - -static struct request * -blk_alloc_request(struct request_queue *q, struct io_cq *icq, - unsigned int flags, gfp_t gfp_mask) -{ - struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask); - - if (!rq) - return NULL; - - blk_rq_init(q, rq); - - rq->cmd_flags = flags | REQ_ALLOCED; - - if (flags & REQ_ELVPRIV) { - rq->elv.icq = icq; - if (unlikely(elv_set_request(q, rq, gfp_mask))) { - mempool_free(rq, q->rq.rq_pool); - return NULL; - } - /* @rq->elv.icq holds on to io_context until @rq is freed */ - if (icq) - get_io_context(icq->ioc); - } - - return rq; + mempool_free(rq, rl->rq_pool); } /* @@ -709,18 +804,23 @@ static void ioc_set_batching(struct request_queue *q, struct io_context *ioc) ioc->last_waited = jiffies; } -static void __freed_request(struct request_queue *q, int sync) +static void __freed_request(struct request_list *rl, int sync) { - struct request_list *rl = &q->rq; + struct request_queue *q = rl->q; - if (rl->count[sync] < queue_congestion_off_threshold(q)) + /* + * bdi isn't aware of blkcg yet. As all async IOs end up root + * blkcg anyway, just use root blkcg state. + */ + if (rl == &q->root_rl && + rl->count[sync] < queue_congestion_off_threshold(q)) blk_clear_queue_congested(q, sync); if (rl->count[sync] + 1 <= q->nr_requests) { if (waitqueue_active(&rl->wait[sync])) wake_up(&rl->wait[sync]); - blk_clear_queue_full(q, sync); + blk_clear_rl_full(rl, sync); } } @@ -728,19 +828,61 @@ static void __freed_request(struct request_queue *q, int sync) * A request has just been released. Account for it, update the full and * congestion status, wake up any waiters. Called under q->queue_lock. */ -static void freed_request(struct request_queue *q, unsigned int flags) +static void freed_request(struct request_list *rl, unsigned int flags) { - struct request_list *rl = &q->rq; + struct request_queue *q = rl->q; int sync = rw_is_sync(flags); + q->nr_rqs[sync]--; rl->count[sync]--; if (flags & REQ_ELVPRIV) - rl->elvpriv--; + q->nr_rqs_elvpriv--; - __freed_request(q, sync); + __freed_request(rl, sync); if (unlikely(rl->starved[sync ^ 1])) - __freed_request(q, sync ^ 1); + __freed_request(rl, sync ^ 1); +} + +int blk_update_nr_requests(struct request_queue *q, unsigned int nr) +{ + struct request_list *rl; + + spin_lock_irq(q->queue_lock); + q->nr_requests = nr; + blk_queue_congestion_threshold(q); + + /* congestion isn't cgroup aware and follows root blkcg for now */ + rl = &q->root_rl; + + if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q)) + blk_set_queue_congested(q, BLK_RW_SYNC); + else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q)) + blk_clear_queue_congested(q, BLK_RW_SYNC); + + if (rl->count[BLK_RW_ASYNC] >= queue_congestion_on_threshold(q)) + blk_set_queue_congested(q, BLK_RW_ASYNC); + else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q)) + blk_clear_queue_congested(q, BLK_RW_ASYNC); + + blk_queue_for_each_rl(rl, q) { + if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { + blk_set_rl_full(rl, BLK_RW_SYNC); + } else { + blk_clear_rl_full(rl, BLK_RW_SYNC); + wake_up(&rl->wait[BLK_RW_SYNC]); + } + + if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { + blk_set_rl_full(rl, BLK_RW_ASYNC); + } else { + blk_clear_rl_full(rl, BLK_RW_ASYNC); + wake_up(&rl->wait[BLK_RW_ASYNC]); + } + } + + spin_unlock_irq(q->queue_lock); + return 0; } /* @@ -763,8 +905,24 @@ static bool blk_rq_should_init_elevator(struct bio *bio) } /** - * get_request - get a free request - * @q: request_queue to allocate request from + * rq_ioc - determine io_context for request allocation + * @bio: request being allocated is for this bio (can be %NULL) + * + * Determine io_context to use for request allocation for @bio. May return + * %NULL if %current->io_context doesn't exist. + */ +static struct io_context *rq_ioc(struct bio *bio) +{ +#ifdef CONFIG_BLK_CGROUP + if (bio && bio->bi_ioc) + return bio->bi_ioc; +#endif + return current->io_context; +} + +/** + * __get_request - get a free request + * @rl: request list to allocate from * @rw_flags: RW and SYNC flags * @bio: bio to allocate request for (can be %NULL) * @gfp_mask: allocation mask @@ -776,22 +934,18 @@ static bool blk_rq_should_init_elevator(struct bio *bio) * Returns %NULL on failure, with @q->queue_lock held. * Returns !%NULL on success, with @q->queue_lock *not held*. */ -static struct request *get_request(struct request_queue *q, int rw_flags, - struct bio *bio, gfp_t gfp_mask) +static struct request *__get_request(struct request_list *rl, int rw_flags, + struct bio *bio, gfp_t gfp_mask) { - struct request *rq = NULL; - struct request_list *rl = &q->rq; - struct elevator_type *et; - struct io_context *ioc; + struct request_queue *q = rl->q; + struct request *rq; + struct elevator_type *et = q->elevator->type; + struct io_context *ioc = rq_ioc(bio); struct io_cq *icq = NULL; const bool is_sync = rw_is_sync(rw_flags) != 0; - bool retried = false; int may_queue; -retry: - et = q->elevator->type; - ioc = current->io_context; - if (unlikely(blk_queue_dead(q))) + if (unlikely(blk_queue_dying(q))) return NULL; may_queue = elv_may_queue(q, rw_flags); @@ -801,28 +955,14 @@ retry: if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) { if (rl->count[is_sync]+1 >= q->nr_requests) { /* - * We want ioc to record batching state. If it's - * not already there, creating a new one requires - * dropping queue_lock, which in turn requires - * retesting conditions to avoid queue hang. - */ - if (!ioc && !retried) { - spin_unlock_irq(q->queue_lock); - create_io_context(current, gfp_mask, q->node); - spin_lock_irq(q->queue_lock); - retried = true; - goto retry; - } - - /* * The queue will fill after this allocation, so set * it as full, and mark this process as "batching". * This process will be allowed to complete a batch of * requests, others will be blocked. */ - if (!blk_queue_full(q, is_sync)) { + if (!blk_rl_full(rl, is_sync)) { ioc_set_batching(q, ioc); - blk_set_queue_full(q, is_sync); + blk_set_rl_full(rl, is_sync); } else { if (may_queue != ELV_MQUEUE_MUST && !ioc_batching(q, ioc)) { @@ -831,11 +971,16 @@ retry: * process is not a "batcher", and not * exempted by the IO scheduler */ - goto out; + return NULL; } } } - blk_set_queue_congested(q, is_sync); + /* + * bdi isn't aware of blkcg yet. As all async IOs end up + * root blkcg anyway, just use root blkcg state. + */ + if (rl == &q->root_rl) + blk_set_queue_congested(q, is_sync); } /* @@ -844,8 +989,9 @@ retry: * allocated with any setting of ->nr_requests */ if (rl->count[is_sync] >= (3 * q->nr_requests / 2)) - goto out; + return NULL; + q->nr_rqs[is_sync]++; rl->count[is_sync]++; rl->starved[is_sync] = 0; @@ -859,10 +1005,9 @@ retry: * Also, lookup icq while holding queue_lock. If it doesn't exist, * it will be created after releasing queue_lock. */ - if (blk_rq_should_init_elevator(bio) && - !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags)) { + if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) { rw_flags |= REQ_ELVPRIV; - rl->elvpriv++; + q->nr_rqs_elvpriv++; if (et->icq_cache && ioc) icq = ioc_lookup_icq(ioc, q); } @@ -871,41 +1016,33 @@ retry: rw_flags |= REQ_IO_STAT; spin_unlock_irq(q->queue_lock); - /* create icq if missing */ - if ((rw_flags & REQ_ELVPRIV) && unlikely(et->icq_cache && !icq)) { - icq = ioc_create_icq(q, gfp_mask); - if (!icq) - goto fail_icq; - } - - rq = blk_alloc_request(q, icq, rw_flags, gfp_mask); + /* allocate and init request */ + rq = mempool_alloc(rl->rq_pool, gfp_mask); + if (!rq) + goto fail_alloc; -fail_icq: - if (unlikely(!rq)) { - /* - * Allocation failed presumably due to memory. Undo anything - * we might have messed up. - * - * Allocating task should really be put onto the front of the - * wait queue, but this is pretty rare. - */ - spin_lock_irq(q->queue_lock); - freed_request(q, rw_flags); + blk_rq_init(q, rq); + blk_rq_set_rl(rq, rl); + rq->cmd_flags = rw_flags | REQ_ALLOCED; + + /* init elvpriv */ + if (rw_flags & REQ_ELVPRIV) { + if (unlikely(et->icq_cache && !icq)) { + if (ioc) + icq = ioc_create_icq(ioc, q, gfp_mask); + if (!icq) + goto fail_elvpriv; + } - /* - * in the very unlikely event that allocation failed and no - * requests for this direction was pending, mark us starved - * so that freeing of a request in the other direction will - * notice us. another possible fix would be to split the - * rq mempool into READ and WRITE - */ -rq_starved: - if (unlikely(rl->count[is_sync] == 0)) - rl->starved[is_sync] = 1; + rq->elv.icq = icq; + if (unlikely(elv_set_request(q, rq, bio, gfp_mask))) + goto fail_elvpriv; - goto out; + /* @rq->elv.icq holds io_context until @rq is freed */ + if (icq) + get_io_context(icq->ioc); } - +out: /* * ioc may be NULL here, and ioc_batching will be false. That's * OK, if the queue is under the request limit then requests need @@ -916,80 +1053,131 @@ rq_starved: ioc->nr_batch_requests--; trace_block_getrq(q, bio, rw_flags & 1); -out: return rq; + +fail_elvpriv: + /* + * elvpriv init failed. ioc, icq and elvpriv aren't mempool backed + * and may fail indefinitely under memory pressure and thus + * shouldn't stall IO. Treat this request as !elvpriv. This will + * disturb iosched and blkcg but weird is bettern than dead. + */ + printk_ratelimited(KERN_WARNING "%s: request aux data allocation failed, iosched may be disturbed\n", + dev_name(q->backing_dev_info.dev)); + + rq->cmd_flags &= ~REQ_ELVPRIV; + rq->elv.icq = NULL; + + spin_lock_irq(q->queue_lock); + q->nr_rqs_elvpriv--; + spin_unlock_irq(q->queue_lock); + goto out; + +fail_alloc: + /* + * Allocation failed presumably due to memory. Undo anything we + * might have messed up. + * + * Allocating task should really be put onto the front of the wait + * queue, but this is pretty rare. + */ + spin_lock_irq(q->queue_lock); + freed_request(rl, rw_flags); + + /* + * in the very unlikely event that allocation failed and no + * requests for this direction was pending, mark us starved so that + * freeing of a request in the other direction will notice + * us. another possible fix would be to split the rq mempool into + * READ and WRITE + */ +rq_starved: + if (unlikely(rl->count[is_sync] == 0)) + rl->starved[is_sync] = 1; + return NULL; } /** - * get_request_wait - get a free request with retry + * get_request - get a free request * @q: request_queue to allocate request from * @rw_flags: RW and SYNC flags * @bio: bio to allocate request for (can be %NULL) + * @gfp_mask: allocation mask * - * Get a free request from @q. This function keeps retrying under memory - * pressure and fails iff @q is dead. + * Get a free request from @q. If %__GFP_WAIT is set in @gfp_mask, this + * function keeps retrying under memory pressure and fails iff @q is dead. * * Must be callled with @q->queue_lock held and, * Returns %NULL on failure, with @q->queue_lock held. * Returns !%NULL on success, with @q->queue_lock *not held*. */ -static struct request *get_request_wait(struct request_queue *q, int rw_flags, - struct bio *bio) +static struct request *get_request(struct request_queue *q, int rw_flags, + struct bio *bio, gfp_t gfp_mask) { const bool is_sync = rw_is_sync(rw_flags) != 0; + DEFINE_WAIT(wait); + struct request_list *rl; struct request *rq; - rq = get_request(q, rw_flags, bio, GFP_NOIO); - while (!rq) { - DEFINE_WAIT(wait); - struct request_list *rl = &q->rq; - - if (unlikely(blk_queue_dead(q))) - return NULL; + rl = blk_get_rl(q, bio); /* transferred to @rq on success */ +retry: + rq = __get_request(rl, rw_flags, bio, gfp_mask); + if (rq) + return rq; - prepare_to_wait_exclusive(&rl->wait[is_sync], &wait, - TASK_UNINTERRUPTIBLE); + if (!(gfp_mask & __GFP_WAIT) || unlikely(blk_queue_dying(q))) { + blk_put_rl(rl); + return NULL; + } - trace_block_sleeprq(q, bio, rw_flags & 1); + /* wait on @rl and retry */ + prepare_to_wait_exclusive(&rl->wait[is_sync], &wait, + TASK_UNINTERRUPTIBLE); - spin_unlock_irq(q->queue_lock); - io_schedule(); + trace_block_sleeprq(q, bio, rw_flags & 1); - /* - * After sleeping, we become a "batching" process and - * will be able to allocate at least one request, and - * up to a big batch of them for a small period time. - * See ioc_batching, ioc_set_batching - */ - create_io_context(current, GFP_NOIO, q->node); - ioc_set_batching(q, current->io_context); + spin_unlock_irq(q->queue_lock); + io_schedule(); - spin_lock_irq(q->queue_lock); - finish_wait(&rl->wait[is_sync], &wait); + /* + * After sleeping, we become a "batching" process and will be able + * to allocate at least one request, and up to a big batch of them + * for a small period time. See ioc_batching, ioc_set_batching + */ + ioc_set_batching(q, current->io_context); - rq = get_request(q, rw_flags, bio, GFP_NOIO); - }; + spin_lock_irq(q->queue_lock); + finish_wait(&rl->wait[is_sync], &wait); - return rq; + goto retry; } -struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) +static struct request *blk_old_get_request(struct request_queue *q, int rw, + gfp_t gfp_mask) { struct request *rq; BUG_ON(rw != READ && rw != WRITE); + /* create ioc upfront */ + create_io_context(gfp_mask, q->node); + spin_lock_irq(q->queue_lock); - if (gfp_mask & __GFP_WAIT) - rq = get_request_wait(q, rw, NULL); - else - rq = get_request(q, rw, NULL, gfp_mask); + rq = get_request(q, rw, NULL, gfp_mask); if (!rq) spin_unlock_irq(q->queue_lock); /* q->queue_lock is unlocked at this point */ return rq; } + +struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) +{ + if (q->mq_ops) + return blk_mq_alloc_request(q, rw, gfp_mask, false); + else + return blk_old_get_request(q, rw, gfp_mask); +} EXPORT_SYMBOL(blk_get_request); /** @@ -1031,6 +1219,8 @@ struct request *blk_make_request(struct request_queue *q, struct bio *bio, if (unlikely(!rq)) return ERR_PTR(-ENOMEM); + blk_rq_set_block_pc(rq); + for_each_bio(bio) { struct bio *bounce_bio = bio; int ret; @@ -1048,6 +1238,22 @@ struct request *blk_make_request(struct request_queue *q, struct bio *bio, EXPORT_SYMBOL(blk_make_request); /** + * blk_rq_set_block_pc - initialize a requeest to type BLOCK_PC + * @rq: request to be initialized + * + */ +void blk_rq_set_block_pc(struct request *rq) +{ + rq->cmd_type = REQ_TYPE_BLOCK_PC; + rq->__data_len = 0; + rq->__sector = (sector_t) -1; + rq->bio = rq->biotail = NULL; + memset(rq->__cmd, 0, sizeof(rq->__cmd)); + rq->cmd = rq->__cmd; +} +EXPORT_SYMBOL(blk_rq_set_block_pc); + +/** * blk_requeue_request - put a request back on queue * @q: request queue where request should be inserted * @rq: request to be inserted @@ -1075,19 +1281,22 @@ EXPORT_SYMBOL(blk_requeue_request); static void add_acct_request(struct request_queue *q, struct request *rq, int where) { - drive_stat_acct(rq, 1); + blk_account_io_start(rq, true); __elv_add_request(q, rq, where); } static void part_round_stats_single(int cpu, struct hd_struct *part, unsigned long now) { + int inflight; + if (now == part->stamp) return; - if (part_in_flight(part)) { + inflight = part_in_flight(part); + if (inflight) { __part_stat_add(cpu, part, time_in_queue, - part_in_flight(part) * (now - part->stamp)); + inflight * (now - part->stamp)); __part_stat_add(cpu, part, io_ticks, (now - part->stamp)); } part->stamp = now; @@ -1119,6 +1328,16 @@ void part_round_stats(int cpu, struct hd_struct *part) } EXPORT_SYMBOL_GPL(part_round_stats); +#ifdef CONFIG_PM_RUNTIME +static void blk_pm_put_request(struct request *rq) +{ + if (rq->q->dev && !(rq->cmd_flags & REQ_PM) && !--rq->q->nr_pending) + pm_runtime_mark_last_busy(rq->q->dev); +} +#else +static inline void blk_pm_put_request(struct request *rq) {} +#endif + /* * queue lock must be held */ @@ -1126,8 +1345,13 @@ void __blk_put_request(struct request_queue *q, struct request *req) { if (unlikely(!q)) return; - if (unlikely(--req->ref_count)) + + if (q->mq_ops) { + blk_mq_free_request(req); return; + } + + blk_pm_put_request(req); elv_completed_request(q, req); @@ -1140,24 +1364,31 @@ void __blk_put_request(struct request_queue *q, struct request *req) */ if (req->cmd_flags & REQ_ALLOCED) { unsigned int flags = req->cmd_flags; + struct request_list *rl = blk_rq_rl(req); BUG_ON(!list_empty(&req->queuelist)); - BUG_ON(!hlist_unhashed(&req->hash)); + BUG_ON(ELV_ON_HASH(req)); - blk_free_request(q, req); - freed_request(q, flags); + blk_free_request(rl, req); + freed_request(rl, flags); + blk_put_rl(rl); } } EXPORT_SYMBOL_GPL(__blk_put_request); void blk_put_request(struct request *req) { - unsigned long flags; struct request_queue *q = req->q; - spin_lock_irqsave(q->queue_lock, flags); - __blk_put_request(q, req); - spin_unlock_irqrestore(q->queue_lock, flags); + if (q->mq_ops) + blk_mq_free_request(req); + else { + unsigned long flags; + + spin_lock_irqsave(q->queue_lock, flags); + __blk_put_request(q, req); + spin_unlock_irqrestore(q->queue_lock, flags); + } } EXPORT_SYMBOL(blk_put_request); @@ -1183,47 +1414,46 @@ void blk_add_request_payload(struct request *rq, struct page *page, bio->bi_io_vec->bv_offset = 0; bio->bi_io_vec->bv_len = len; - bio->bi_size = len; + bio->bi_iter.bi_size = len; bio->bi_vcnt = 1; bio->bi_phys_segments = 1; rq->__data_len = rq->resid_len = len; rq->nr_phys_segments = 1; - rq->buffer = bio_data(bio); } EXPORT_SYMBOL_GPL(blk_add_request_payload); -static bool bio_attempt_back_merge(struct request_queue *q, struct request *req, - struct bio *bio) +bool bio_attempt_back_merge(struct request_queue *q, struct request *req, + struct bio *bio) { const int ff = bio->bi_rw & REQ_FAILFAST_MASK; if (!ll_back_merge_fn(q, req, bio)) return false; - trace_block_bio_backmerge(q, bio); + trace_block_bio_backmerge(q, req, bio); if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) blk_rq_set_mixed_merge(req); req->biotail->bi_next = bio; req->biotail = bio; - req->__data_len += bio->bi_size; + req->__data_len += bio->bi_iter.bi_size; req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); - drive_stat_acct(req, 0); + blk_account_io_start(req, false); return true; } -static bool bio_attempt_front_merge(struct request_queue *q, - struct request *req, struct bio *bio) +bool bio_attempt_front_merge(struct request_queue *q, struct request *req, + struct bio *bio) { const int ff = bio->bi_rw & REQ_FAILFAST_MASK; if (!ll_front_merge_fn(q, req, bio)) return false; - trace_block_bio_frontmerge(q, bio); + trace_block_bio_frontmerge(q, req, bio); if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) blk_rq_set_mixed_merge(req); @@ -1231,22 +1461,16 @@ static bool bio_attempt_front_merge(struct request_queue *q, bio->bi_next = req->bio; req->bio = bio; - /* - * may not be valid. if the low level driver said - * it didn't need a bounce buffer then it better - * not touch req->buffer either... - */ - req->buffer = bio_data(bio); - req->__sector = bio->bi_sector; - req->__data_len += bio->bi_size; + req->__sector = bio->bi_iter.bi_sector; + req->__data_len += bio->bi_iter.bi_size; req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); - drive_stat_acct(req, 0); + blk_account_io_start(req, false); return true; } /** - * attempt_plug_merge - try to merge with %current's plugged list + * blk_attempt_plug_merge - try to merge with %current's plugged list * @q: request_queue new bio is being queued at * @bio: new bio being queued * @request_count: out parameter for number of traversed plugged requests @@ -1261,23 +1485,32 @@ static bool bio_attempt_front_merge(struct request_queue *q, * added on the elevator at this point. In addition, we don't have * reliable access to the elevator outside queue lock. Only check basic * merging parameters without querying the elevator. + * + * Caller must ensure !blk_queue_nomerges(q) beforehand. */ -static bool attempt_plug_merge(struct request_queue *q, struct bio *bio, - unsigned int *request_count) +bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, + unsigned int *request_count) { struct blk_plug *plug; struct request *rq; bool ret = false; + struct list_head *plug_list; plug = current->plug; if (!plug) goto out; *request_count = 0; - list_for_each_entry_reverse(rq, &plug->list, queuelist) { + if (q->mq_ops) + plug_list = &plug->mq_list; + else + plug_list = &plug->list; + + list_for_each_entry_reverse(rq, plug_list, queuelist) { int el_ret; - (*request_count)++; + if (rq->q == q) + (*request_count)++; if (rq->q != q || !blk_rq_merge_ok(rq, bio)) continue; @@ -1306,7 +1539,7 @@ void init_request_from_bio(struct request *req, struct bio *bio) req->cmd_flags |= REQ_FAILFAST_MASK; req->errors = 0; - req->__sector = bio->bi_sector; + req->__sector = bio->bi_iter.bi_sector; req->ioprio = bio_prio(bio); blk_rq_bio_prep(req->q, req, bio); } @@ -1326,6 +1559,11 @@ void blk_queue_bio(struct request_queue *q, struct bio *bio) */ blk_queue_bounce(q, &bio); + if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { + bio_endio(bio, -EIO); + return; + } + if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) { spin_lock_irq(q->queue_lock); where = ELEVATOR_INSERT_FLUSH; @@ -1336,7 +1574,8 @@ void blk_queue_bio(struct request_queue *q, struct bio *bio) * Check if we can merge with the plugged list before grabbing * any locks. */ - if (attempt_plug_merge(q, bio, &request_count)) + if (!blk_queue_nomerges(q) && + blk_attempt_plug_merge(q, bio, &request_count)) return; spin_lock_irq(q->queue_lock); @@ -1372,7 +1611,7 @@ get_rq: * Grab a free request. This is might sleep but can not fail. * Returns with the queue unlocked. */ - req = get_request_wait(q, rw_flags, bio); + req = get_request(q, rw_flags, bio, GFP_NOIO); if (unlikely(!req)) { bio_endio(bio, -ENODEV); /* @q is dead */ goto out_unlock; @@ -1393,27 +1632,18 @@ get_rq: if (plug) { /* * If this is the first request added after a plug, fire - * of a plug trace. If others have been added before, check - * if we have multiple devices in this plug. If so, make a - * note to sort the list before dispatch. + * of a plug trace. */ - if (list_empty(&plug->list)) + if (!request_count) trace_block_plug(q); else { - if (!plug->should_sort) { - struct request *__rq; - - __rq = list_entry_rq(plug->list.prev); - if (__rq->q != q) - plug->should_sort = 1; - } if (request_count >= BLK_MAX_REQUEST_COUNT) { blk_flush_plug_list(plug, false); trace_block_plug(q); } } list_add_tail(&req->queuelist, &plug->list); - drive_stat_acct(req, 1); + blk_account_io_start(req, true); } else { spin_lock_irq(q->queue_lock); add_acct_request(q, req, where); @@ -1434,12 +1664,12 @@ static inline void blk_partition_remap(struct bio *bio) if (bio_sectors(bio) && bdev != bdev->bd_contains) { struct hd_struct *p = bdev->bd_part; - bio->bi_sector += p->start_sect; + bio->bi_iter.bi_sector += p->start_sect; bio->bi_bdev = bdev->bd_contains; trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio, bdev->bd_dev, - bio->bi_sector - p->start_sect); + bio->bi_iter.bi_sector - p->start_sect); } } @@ -1451,7 +1681,7 @@ static void handle_bad_sector(struct bio *bio) printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n", bdevname(bio->bi_bdev, b), bio->bi_rw, - (unsigned long long)bio->bi_sector + bio_sectors(bio), + (unsigned long long)bio_end_sector(bio), (long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9)); set_bit(BIO_EOF, &bio->bi_flags); @@ -1477,7 +1707,7 @@ static int __init fail_make_request_debugfs(void) struct dentry *dir = fault_create_debugfs_attr("fail_make_request", NULL, &fail_make_request); - return IS_ERR(dir) ? PTR_ERR(dir) : 0; + return PTR_ERR_OR_ZERO(dir); } late_initcall(fail_make_request_debugfs); @@ -1505,7 +1735,7 @@ static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors) /* Test device or partition size, when known. */ maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9; if (maxsector) { - sector_t sector = bio->bi_sector; + sector_t sector = bio->bi_iter.bi_sector; if (maxsector < nr_sectors || maxsector - nr_sectors < sector) { /* @@ -1541,12 +1771,12 @@ generic_make_request_checks(struct bio *bio) "generic_make_request: Trying to access " "nonexistent block-device %s (%Lu)\n", bdevname(bio->bi_bdev, b), - (long long) bio->bi_sector); + (long long) bio->bi_iter.bi_sector); goto end_io; } - if (unlikely(!(bio->bi_rw & REQ_DISCARD) && - nr_sectors > queue_max_hw_sectors(q))) { + if (likely(bio_is_rw(bio) && + nr_sectors > queue_max_hw_sectors(q))) { printk(KERN_ERR "bio too big device %s (%u > %u)\n", bdevname(bio->bi_bdev, b), bio_sectors(bio), @@ -1555,9 +1785,9 @@ generic_make_request_checks(struct bio *bio) } part = bio->bi_bdev->bd_part; - if (should_fail_request(part, bio->bi_size) || + if (should_fail_request(part, bio->bi_iter.bi_size) || should_fail_request(&part_to_disk(part)->part0, - bio->bi_size)) + bio->bi_iter.bi_size)) goto end_io; /* @@ -1566,9 +1796,6 @@ generic_make_request_checks(struct bio *bio) */ blk_partition_remap(bio); - if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) - goto end_io; - if (bio_check_eod(bio, nr_sectors)) goto end_io; @@ -1587,12 +1814,24 @@ generic_make_request_checks(struct bio *bio) if ((bio->bi_rw & REQ_DISCARD) && (!blk_queue_discard(q) || - ((bio->bi_rw & REQ_SECURE) && - !blk_queue_secdiscard(q)))) { + ((bio->bi_rw & REQ_SECURE) && !blk_queue_secdiscard(q)))) { + err = -EOPNOTSUPP; + goto end_io; + } + + if (bio->bi_rw & REQ_WRITE_SAME && !bdev_write_same(bio->bi_bdev)) { err = -EOPNOTSUPP; goto end_io; } + /* + * Various block parts want %current->io_context and lazy ioc + * allocation ends up trading a lot of pain for a small amount of + * memory. Just allocate it upfront. This may fail and block + * layer knows how to live with it. + */ + create_io_context(GFP_ATOMIC, q->node); + if (blk_throtl_bio(q, bio)) return false; /* throttled, will be resubmitted later */ @@ -1690,19 +1929,24 @@ EXPORT_SYMBOL(generic_make_request); */ void submit_bio(int rw, struct bio *bio) { - int count = bio_sectors(bio); - bio->bi_rw |= rw; /* * If it's a regular read/write or a barrier with data attached, * go through the normal accounting stuff before submission. */ - if (bio_has_data(bio) && !(rw & REQ_DISCARD)) { + if (bio_has_data(bio)) { + unsigned int count; + + if (unlikely(rw & REQ_WRITE_SAME)) + count = bdev_logical_block_size(bio->bi_bdev) >> 9; + else + count = bio_sectors(bio); + if (rw & WRITE) { count_vm_events(PGPGOUT, count); } else { - task_io_account_read(bio->bi_size); + task_io_account_read(bio->bi_iter.bi_size); count_vm_events(PGPGIN, count); } @@ -1711,7 +1955,7 @@ void submit_bio(int rw, struct bio *bio) printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n", current->comm, task_pid_nr(current), (rw & WRITE) ? "WRITE" : "READ", - (unsigned long long)bio->bi_sector, + (unsigned long long)bio->bi_iter.bi_sector, bdevname(bio->bi_bdev, b), count); } @@ -1737,18 +1981,17 @@ EXPORT_SYMBOL(submit_bio); * in some cases below, so export this function. * Request stacking drivers like request-based dm may change the queue * limits while requests are in the queue (e.g. dm's table swapping). - * Such request stacking drivers should check those requests agaist + * Such request stacking drivers should check those requests against * the new queue limits again when they dispatch those requests, * although such checkings are also done against the old queue limits * when submitting requests. */ int blk_rq_check_limits(struct request_queue *q, struct request *rq) { - if (rq->cmd_flags & REQ_DISCARD) + if (!rq_mergeable(rq)) return 0; - if (blk_rq_sectors(rq) > queue_max_sectors(q) || - blk_rq_bytes(rq) > queue_max_hw_sectors(q) << 9) { + if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, rq->cmd_flags)) { printk(KERN_ERR "%s: over max size limit.\n", __func__); return -EIO; } @@ -1787,7 +2030,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq) return -EIO; spin_lock_irqsave(q->queue_lock, flags); - if (unlikely(blk_queue_dead(q))) { + if (unlikely(blk_queue_dying(q))) { spin_unlock_irqrestore(q->queue_lock, flags); return -ENODEV; } @@ -1845,7 +2088,7 @@ unsigned int blk_rq_err_bytes(const struct request *rq) for (bio = rq->bio; bio; bio = bio->bi_next) { if ((bio->bi_rw & ff) != ff) break; - bytes += bio->bi_size; + bytes += bio->bi_iter.bi_size; } /* this could lead to infinite loop */ @@ -1854,7 +2097,7 @@ unsigned int blk_rq_err_bytes(const struct request *rq) } EXPORT_SYMBOL_GPL(blk_rq_err_bytes); -static void blk_account_io_completion(struct request *req, unsigned int bytes) +void blk_account_io_completion(struct request *req, unsigned int bytes) { if (blk_do_io_stat(req)) { const int rw = rq_data_dir(req); @@ -1868,7 +2111,7 @@ static void blk_account_io_completion(struct request *req, unsigned int bytes) } } -static void blk_account_io_done(struct request *req) +void blk_account_io_done(struct request *req) { /* * Account IO completion. flush_rq isn't accounted as a @@ -1894,6 +2137,64 @@ static void blk_account_io_done(struct request *req) } } +#ifdef CONFIG_PM_RUNTIME +/* + * Don't process normal requests when queue is suspended + * or in the process of suspending/resuming + */ +static struct request *blk_pm_peek_request(struct request_queue *q, + struct request *rq) +{ + if (q->dev && (q->rpm_status == RPM_SUSPENDED || + (q->rpm_status != RPM_ACTIVE && !(rq->cmd_flags & REQ_PM)))) + return NULL; + else + return rq; +} +#else +static inline struct request *blk_pm_peek_request(struct request_queue *q, + struct request *rq) +{ + return rq; +} +#endif + +void blk_account_io_start(struct request *rq, bool new_io) +{ + struct hd_struct *part; + int rw = rq_data_dir(rq); + int cpu; + + if (!blk_do_io_stat(rq)) + return; + + cpu = part_stat_lock(); + + if (!new_io) { + part = rq->part; + part_stat_inc(cpu, part, merges[rw]); + } else { + part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq)); + if (!hd_struct_try_get(part)) { + /* + * The partition is already being removed, + * the request will be accounted on the disk only + * + * We take a reference on disk->part0 although that + * partition will never be deleted, so we can treat + * it as any other partition. + */ + part = &rq->rq_disk->part0; + hd_struct_get(part); + } + part_round_stats(cpu, part); + part_inc_in_flight(part, rw); + rq->part = part; + } + + part_stat_unlock(); +} + /** * blk_peek_request - peek at the top of a request queue * @q: request queue to peek at @@ -1916,6 +2217,11 @@ struct request *blk_peek_request(struct request_queue *q) int ret; while ((rq = __elv_next_request(q)) != NULL) { + + rq = blk_pm_peek_request(q, rq); + if (!rq) + break; + if (!(rq->cmd_flags & REQ_STARTED)) { /* * This is the first time the device driver @@ -2040,6 +2346,7 @@ void blk_start_request(struct request *req) if (unlikely(blk_bidi_rq(req))) req->next_rq->resid_len = blk_rq_bytes(req->next_rq); + BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags)); blk_add_timer(req); } EXPORT_SYMBOL(blk_start_request); @@ -2094,13 +2401,12 @@ EXPORT_SYMBOL(blk_fetch_request); **/ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) { - int total_bytes, bio_nbytes, next_idx = 0; - struct bio *bio; + int total_bytes; if (!req->bio) return false; - trace_block_rq_complete(req->q, req); + trace_block_rq_complete(req->q, req, nr_bytes); /* * For fs requests, rq is just carrier of independent bio's @@ -2127,68 +2433,44 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) case -EBADE: error_type = "critical nexus"; break; + case -ETIMEDOUT: + error_type = "timeout"; + break; + case -ENOSPC: + error_type = "critical space allocation"; + break; + case -ENODATA: + error_type = "critical medium"; + break; case -EIO: default: error_type = "I/O"; break; } - printk(KERN_ERR "end_request: %s error, dev %s, sector %llu\n", - error_type, req->rq_disk ? req->rq_disk->disk_name : "?", - (unsigned long long)blk_rq_pos(req)); + printk_ratelimited(KERN_ERR "end_request: %s error, dev %s, sector %llu\n", + error_type, req->rq_disk ? + req->rq_disk->disk_name : "?", + (unsigned long long)blk_rq_pos(req)); + } blk_account_io_completion(req, nr_bytes); - total_bytes = bio_nbytes = 0; - while ((bio = req->bio) != NULL) { - int nbytes; + total_bytes = 0; + while (req->bio) { + struct bio *bio = req->bio; + unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes); - if (nr_bytes >= bio->bi_size) { + if (bio_bytes == bio->bi_iter.bi_size) req->bio = bio->bi_next; - nbytes = bio->bi_size; - req_bio_endio(req, bio, nbytes, error); - next_idx = 0; - bio_nbytes = 0; - } else { - int idx = bio->bi_idx + next_idx; - - if (unlikely(idx >= bio->bi_vcnt)) { - blk_dump_rq_flags(req, "__end_that"); - printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n", - __func__, idx, bio->bi_vcnt); - break; - } - - nbytes = bio_iovec_idx(bio, idx)->bv_len; - BIO_BUG_ON(nbytes > bio->bi_size); - /* - * not a complete bvec done - */ - if (unlikely(nbytes > nr_bytes)) { - bio_nbytes += nr_bytes; - total_bytes += nr_bytes; - break; - } - - /* - * advance to the next vector - */ - next_idx++; - bio_nbytes += nbytes; - } + req_bio_endio(req, bio, bio_bytes, error); - total_bytes += nbytes; - nr_bytes -= nbytes; + total_bytes += bio_bytes; + nr_bytes -= bio_bytes; - bio = req->bio; - if (bio) { - /* - * end more in this run, or just return 'not-done' - */ - if (unlikely(nr_bytes <= 0)) - break; - } + if (!nr_bytes) + break; } /* @@ -2204,21 +2486,10 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) return false; } - /* - * if the request wasn't completed, update state - */ - if (bio_nbytes) { - req_bio_endio(req, bio, bio_nbytes, error); - bio->bi_idx += next_idx; - bio_iovec(bio)->bv_offset += nr_bytes; - bio_iovec(bio)->bv_len -= nr_bytes; - } - req->__data_len -= total_bytes; - req->buffer = bio_data(req->bio); /* update sector only for requests with clear definition of sector */ - if (req->cmd_type == REQ_TYPE_FS || (req->cmd_flags & REQ_DISCARD)) + if (req->cmd_type == REQ_TYPE_FS) req->__sector += total_bytes >> 9; /* mixed attributes always follow the first bio */ @@ -2284,7 +2555,7 @@ EXPORT_SYMBOL_GPL(blk_unprep_request); /* * queue lock must be held */ -static void blk_finish_request(struct request *req, int error) +void blk_finish_request(struct request *req, int error) { if (blk_rq_tagged(req)) blk_queue_end_tag(req->q, req); @@ -2299,7 +2570,6 @@ static void blk_finish_request(struct request *req, int error) if (req->cmd_flags & REQ_DONTPREP) blk_unprep_request(req); - blk_account_io_done(req); if (req->end_io) @@ -2311,6 +2581,7 @@ static void blk_finish_request(struct request *req, int error) __blk_put_request(req->q, req); } } +EXPORT_SYMBOL(blk_finish_request); /** * blk_end_bidi_request - Complete a bidi request @@ -2534,11 +2805,10 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq, /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */ rq->cmd_flags |= bio->bi_rw & REQ_WRITE; - if (bio_has_data(bio)) { + if (bio_has_data(bio)) rq->nr_phys_segments = bio_phys_segments(q, bio); - rq->buffer = bio_data(bio); - } - rq->__data_len = bio->bi_size; + + rq->__data_len = bio->bi_iter.bi_size; rq->bio = rq->biotail = bio; if (bio->bi_bdev) @@ -2556,10 +2826,10 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq, void rq_flush_dcache_pages(struct request *rq) { struct req_iterator iter; - struct bio_vec *bvec; + struct bio_vec bvec; rq_for_each_segment(bvec, rq, iter) - flush_dcache_page(bvec->bv_page); + flush_dcache_page(bvec.bv_page); } EXPORT_SYMBOL_GPL(rq_flush_dcache_pages); #endif @@ -2613,7 +2883,7 @@ EXPORT_SYMBOL_GPL(blk_rq_unprep_clone); /* * Copy attributes of the original request to the clone request. - * The actual data parts (e.g. ->cmd, ->buffer, ->sense) are not copied. + * The actual data parts (e.g. ->cmd, ->sense) are not copied. */ static void __blk_rq_prep_clone(struct request *dst, struct request *src) { @@ -2639,7 +2909,7 @@ static void __blk_rq_prep_clone(struct request *dst, struct request *src) * * Description: * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq. - * The actual data parts of @rq_src (e.g. ->cmd, ->buffer, ->sense) + * The actual data parts of @rq_src (e.g. ->cmd, ->sense) * are not copied, and copying such parts is the caller's responsibility. * Also, pages which the original bios are pointing to are not copied * and the cloned bios just point same pages. @@ -2659,16 +2929,10 @@ int blk_rq_prep_clone(struct request *rq, struct request *rq_src, blk_rq_init(NULL, rq); __rq_for_each_bio(bio_src, rq_src) { - bio = bio_alloc_bioset(gfp_mask, bio_src->bi_max_vecs, bs); + bio = bio_clone_bioset(bio_src, gfp_mask, bs); if (!bio) goto free_and_out; - __bio_clone(bio, bio_src); - - if (bio_integrity(bio_src) && - bio_integrity_clone(bio, bio_src, gfp_mask, bs)) - goto free_and_out; - if (bio_ctr && bio_ctr(bio, bio_src, data)) goto free_and_out; @@ -2685,27 +2949,32 @@ int blk_rq_prep_clone(struct request *rq, struct request *rq_src, free_and_out: if (bio) - bio_free(bio, bs); + bio_put(bio); blk_rq_unprep_clone(rq); return -ENOMEM; } EXPORT_SYMBOL_GPL(blk_rq_prep_clone); -int kblockd_schedule_work(struct request_queue *q, struct work_struct *work) +int kblockd_schedule_work(struct work_struct *work) { return queue_work(kblockd_workqueue, work); } EXPORT_SYMBOL(kblockd_schedule_work); -int kblockd_schedule_delayed_work(struct request_queue *q, - struct delayed_work *dwork, unsigned long delay) +int kblockd_schedule_delayed_work(struct delayed_work *dwork, + unsigned long delay) { return queue_delayed_work(kblockd_workqueue, dwork, delay); } EXPORT_SYMBOL(kblockd_schedule_delayed_work); -#define PLUG_MAGIC 0x91827364 +int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork, + unsigned long delay) +{ + return queue_delayed_work_on(cpu, kblockd_workqueue, dwork, delay); +} +EXPORT_SYMBOL(kblockd_schedule_delayed_work_on); /** * blk_start_plug - initialize blk_plug and track it inside the task_struct @@ -2725,10 +2994,9 @@ void blk_start_plug(struct blk_plug *plug) { struct task_struct *tsk = current; - plug->magic = PLUG_MAGIC; INIT_LIST_HEAD(&plug->list); + INIT_LIST_HEAD(&plug->mq_list); INIT_LIST_HEAD(&plug->cb_list); - plug->should_sort = 0; /* * If this is a nested plug, don't actually assign it. It will be @@ -2749,7 +3017,8 @@ static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b) struct request *rqa = container_of(a, struct request, queuelist); struct request *rqb = container_of(b, struct request, queuelist); - return !(rqa->q <= rqb->q); + return !(rqa->q < rqb->q || + (rqa->q == rqb->q && blk_rq_pos(rqa) < blk_rq_pos(rqb))); } /* @@ -2764,47 +3033,55 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth, { trace_block_unplug(q, depth, !from_schedule); - /* - * Don't mess with dead queue. - */ - if (unlikely(blk_queue_dead(q))) { - spin_unlock(q->queue_lock); - return; - } - - /* - * If we are punting this to kblockd, then we can safely drop - * the queue_lock before waking kblockd (which needs to take - * this lock). - */ - if (from_schedule) { - spin_unlock(q->queue_lock); + if (from_schedule) blk_run_queue_async(q); - } else { + else __blk_run_queue(q); - spin_unlock(q->queue_lock); - } - + spin_unlock(q->queue_lock); } -static void flush_plug_callbacks(struct blk_plug *plug) +static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule) { LIST_HEAD(callbacks); - if (list_empty(&plug->cb_list)) - return; - - list_splice_init(&plug->cb_list, &callbacks); + while (!list_empty(&plug->cb_list)) { + list_splice_init(&plug->cb_list, &callbacks); - while (!list_empty(&callbacks)) { - struct blk_plug_cb *cb = list_first_entry(&callbacks, + while (!list_empty(&callbacks)) { + struct blk_plug_cb *cb = list_first_entry(&callbacks, struct blk_plug_cb, list); - list_del(&cb->list); - cb->callback(cb); + list_del(&cb->list); + cb->callback(cb, from_schedule); + } } } +struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data, + int size) +{ + struct blk_plug *plug = current->plug; + struct blk_plug_cb *cb; + + if (!plug) + return NULL; + + list_for_each_entry(cb, &plug->cb_list, list) + if (cb->callback == unplug && cb->data == data) + return cb; + + /* Not currently on the callback list */ + BUG_ON(size < sizeof(*cb)); + cb = kzalloc(size, GFP_ATOMIC); + if (cb) { + cb->data = data; + cb->callback = unplug; + list_add(&cb->list, &plug->cb_list); + } + return cb; +} +EXPORT_SYMBOL(blk_check_plugged); + void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) { struct request_queue *q; @@ -2813,18 +3090,17 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) LIST_HEAD(list); unsigned int depth; - BUG_ON(plug->magic != PLUG_MAGIC); + flush_plug_callbacks(plug, from_schedule); + + if (!list_empty(&plug->mq_list)) + blk_mq_flush_plug_list(plug, from_schedule); - flush_plug_callbacks(plug); if (list_empty(&plug->list)) return; list_splice_init(&plug->list, &list); - if (plug->should_sort) { - list_sort(NULL, &list, plug_rq_cmp); - plug->should_sort = 0; - } + list_sort(NULL, &list, plug_rq_cmp); q = NULL; depth = 0; @@ -2852,7 +3128,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) /* * Short-circuit if @q is dead */ - if (unlikely(blk_queue_dead(q))) { + if (unlikely(blk_queue_dying(q))) { __blk_end_request_all(rq, -ENODEV); continue; } @@ -2886,6 +3162,149 @@ void blk_finish_plug(struct blk_plug *plug) } EXPORT_SYMBOL(blk_finish_plug); +#ifdef CONFIG_PM_RUNTIME +/** + * blk_pm_runtime_init - Block layer runtime PM initialization routine + * @q: the queue of the device + * @dev: the device the queue belongs to + * + * Description: + * Initialize runtime-PM-related fields for @q and start auto suspend for + * @dev. Drivers that want to take advantage of request-based runtime PM + * should call this function after @dev has been initialized, and its + * request queue @q has been allocated, and runtime PM for it can not happen + * yet(either due to disabled/forbidden or its usage_count > 0). In most + * cases, driver should call this function before any I/O has taken place. + * + * This function takes care of setting up using auto suspend for the device, + * the autosuspend delay is set to -1 to make runtime suspend impossible + * until an updated value is either set by user or by driver. Drivers do + * not need to touch other autosuspend settings. + * + * The block layer runtime PM is request based, so only works for drivers + * that use request as their IO unit instead of those directly use bio's. + */ +void blk_pm_runtime_init(struct request_queue *q, struct device *dev) +{ + q->dev = dev; + q->rpm_status = RPM_ACTIVE; + pm_runtime_set_autosuspend_delay(q->dev, -1); + pm_runtime_use_autosuspend(q->dev); +} +EXPORT_SYMBOL(blk_pm_runtime_init); + +/** + * blk_pre_runtime_suspend - Pre runtime suspend check + * @q: the queue of the device + * + * Description: + * This function will check if runtime suspend is allowed for the device + * by examining if there are any requests pending in the queue. If there + * are requests pending, the device can not be runtime suspended; otherwise, + * the queue's status will be updated to SUSPENDING and the driver can + * proceed to suspend the device. + * + * For the not allowed case, we mark last busy for the device so that + * runtime PM core will try to autosuspend it some time later. + * + * This function should be called near the start of the device's + * runtime_suspend callback. + * + * Return: + * 0 - OK to runtime suspend the device + * -EBUSY - Device should not be runtime suspended + */ +int blk_pre_runtime_suspend(struct request_queue *q) +{ + int ret = 0; + + spin_lock_irq(q->queue_lock); + if (q->nr_pending) { + ret = -EBUSY; + pm_runtime_mark_last_busy(q->dev); + } else { + q->rpm_status = RPM_SUSPENDING; + } + spin_unlock_irq(q->queue_lock); + return ret; +} +EXPORT_SYMBOL(blk_pre_runtime_suspend); + +/** + * blk_post_runtime_suspend - Post runtime suspend processing + * @q: the queue of the device + * @err: return value of the device's runtime_suspend function + * + * Description: + * Update the queue's runtime status according to the return value of the + * device's runtime suspend function and mark last busy for the device so + * that PM core will try to auto suspend the device at a later time. + * + * This function should be called near the end of the device's + * runtime_suspend callback. + */ +void blk_post_runtime_suspend(struct request_queue *q, int err) +{ + spin_lock_irq(q->queue_lock); + if (!err) { + q->rpm_status = RPM_SUSPENDED; + } else { + q->rpm_status = RPM_ACTIVE; + pm_runtime_mark_last_busy(q->dev); + } + spin_unlock_irq(q->queue_lock); +} +EXPORT_SYMBOL(blk_post_runtime_suspend); + +/** + * blk_pre_runtime_resume - Pre runtime resume processing + * @q: the queue of the device + * + * Description: + * Update the queue's runtime status to RESUMING in preparation for the + * runtime resume of the device. + * + * This function should be called near the start of the device's + * runtime_resume callback. + */ +void blk_pre_runtime_resume(struct request_queue *q) +{ + spin_lock_irq(q->queue_lock); + q->rpm_status = RPM_RESUMING; + spin_unlock_irq(q->queue_lock); +} +EXPORT_SYMBOL(blk_pre_runtime_resume); + +/** + * blk_post_runtime_resume - Post runtime resume processing + * @q: the queue of the device + * @err: return value of the device's runtime_resume function + * + * Description: + * Update the queue's runtime status according to the return value of the + * device's runtime_resume function. If it is successfully resumed, process + * the requests that are queued into the device's queue when it is resuming + * and then mark last busy and initiate autosuspend for it. + * + * This function should be called near the end of the device's + * runtime_resume callback. + */ +void blk_post_runtime_resume(struct request_queue *q, int err) +{ + spin_lock_irq(q->queue_lock); + if (!err) { + q->rpm_status = RPM_ACTIVE; + __blk_run_queue(q); + pm_runtime_mark_last_busy(q->dev); + pm_request_autosuspend(q->dev); + } else { + q->rpm_status = RPM_SUSPENDED; + } + spin_unlock_irq(q->queue_lock); +} +EXPORT_SYMBOL(blk_post_runtime_resume); +#endif + int __init blk_dev_init(void) { BUILD_BUG_ON(__REQ_NR_BITS > 8 * |
