diff options
42 files changed, 587 insertions, 621 deletions
diff --git a/arch/m68k/emu/nfblock.c b/arch/m68k/emu/nfblock.c index 48e50f8c1c7..e3011338ab4 100644 --- a/arch/m68k/emu/nfblock.c +++ b/arch/m68k/emu/nfblock.c @@ -59,7 +59,7 @@ struct nfhd_device { struct gendisk *disk; }; -static int nfhd_make_request(struct request_queue *queue, struct bio *bio) +static void nfhd_make_request(struct request_queue *queue, struct bio *bio) { struct nfhd_device *dev = queue->queuedata; struct bio_vec *bvec; @@ -76,7 +76,6 @@ static int nfhd_make_request(struct request_queue *queue, struct bio *bio) sec += len; } bio_endio(bio, 0); - return 0; } static int nfhd_getgeo(struct block_device *bdev, struct hd_geometry *geo) diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c index 265f0f09395..ba427191906 100644 --- a/arch/powerpc/sysdev/axonram.c +++ b/arch/powerpc/sysdev/axonram.c @@ -104,7 +104,7 @@ axon_ram_irq_handler(int irq, void *dev) * axon_ram_make_request - make_request() method for block device * @queue, @bio: see blk_queue_make_request() */ -static int +static void axon_ram_make_request(struct request_queue *queue, struct bio *bio) { struct axon_ram_bank *bank = bio->bi_bdev->bd_disk->private_data; @@ -113,7 +113,6 @@ axon_ram_make_request(struct request_queue *queue, struct bio *bio) struct bio_vec *vec; unsigned int transfered; unsigned short idx; - int rc = 0; phys_mem = bank->io_addr + (bio->bi_sector << AXON_RAM_SECTOR_SHIFT); phys_end = bank->io_addr + bank->size; @@ -121,8 +120,7 @@ axon_ram_make_request(struct request_queue *queue, struct bio *bio) bio_for_each_segment(vec, bio, idx) { if (unlikely(phys_mem + vec->bv_len > phys_end)) { bio_io_error(bio); - rc = -ERANGE; - break; + return; } user_mem = page_address(vec->bv_page) + vec->bv_offset; @@ -135,8 +133,6 @@ axon_ram_make_request(struct request_queue *queue, struct bio *bio) transfered += vec->bv_len; } bio_endio(bio, 0); - - return rc; } /** diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index b596e54ddd7..8f630cec906 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -768,25 +768,14 @@ static uint64_t blkio_get_stat(struct blkio_group *blkg, return disk_total; } -static int blkio_check_dev_num(dev_t dev) -{ - int part = 0; - struct gendisk *disk; - - disk = get_gendisk(dev, &part); - if (!disk || part) - return -ENODEV; - - return 0; -} - static int blkio_policy_parse_and_set(char *buf, struct blkio_policy_node *newpn, enum blkio_policy_id plid, int fileid) { + struct gendisk *disk = NULL; char *s[4], *p, *major_s = NULL, *minor_s = NULL; - int ret; unsigned long major, minor; - int i = 0; + int i = 0, ret = -EINVAL; + int part; dev_t dev; u64 temp; @@ -804,37 +793,36 @@ static int blkio_policy_parse_and_set(char *buf, } if (i != 2) - return -EINVAL; + goto out; p = strsep(&s[0], ":"); if (p != NULL) major_s = p; else - return -EINVAL; + goto out; minor_s = s[0]; if (!minor_s) - return -EINVAL; + goto out; - ret = strict_strtoul(major_s, 10, &major); - if (ret) - return -EINVAL; + if (strict_strtoul(major_s, 10, &major)) + goto out; - ret = strict_strtoul(minor_s, 10, &minor); - if (ret) - return -EINVAL; + if (strict_strtoul(minor_s, 10, &minor)) + goto out; dev = MKDEV(major, minor); - ret = strict_strtoull(s[1], 10, &temp); - if (ret) - return -EINVAL; + if (strict_strtoull(s[1], 10, &temp)) + goto out; /* For rule removal, do not check for device presence. */ if (temp) { - ret = blkio_check_dev_num(dev); - if (ret) - return ret; + disk = get_gendisk(dev, &part); + if (!disk || part) { + ret = -ENODEV; + goto out; + } } newpn->dev = dev; @@ -843,7 +831,7 @@ static int blkio_policy_parse_and_set(char *buf, case BLKIO_POLICY_PROP: if ((temp < BLKIO_WEIGHT_MIN && temp > 0) || temp > BLKIO_WEIGHT_MAX) - return -EINVAL; + goto out; newpn->plid = plid; newpn->fileid = fileid; @@ -860,7 +848,7 @@ static int blkio_policy_parse_and_set(char *buf, case BLKIO_THROTL_read_iops_device: case BLKIO_THROTL_write_iops_device: if (temp > THROTL_IOPS_MAX) - return -EINVAL; + goto out; newpn->plid = plid; newpn->fileid = fileid; @@ -871,68 +859,96 @@ static int blkio_policy_parse_and_set(char *buf, default: BUG(); } - - return 0; + ret = 0; +out: + put_disk(disk); + return ret; } unsigned int blkcg_get_weight(struct blkio_cgroup *blkcg, dev_t dev) { struct blkio_policy_node *pn; + unsigned long flags; + unsigned int weight; + + spin_lock_irqsave(&blkcg->lock, flags); pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_PROP, BLKIO_PROP_weight_device); if (pn) - return pn->val.weight; + weight = pn->val.weight; else - return blkcg->weight; + weight = blkcg->weight; + + spin_unlock_irqrestore(&blkcg->lock, flags); + + return weight; } EXPORT_SYMBOL_GPL(blkcg_get_weight); uint64_t blkcg_get_read_bps(struct blkio_cgroup *blkcg, dev_t dev) { struct blkio_policy_node *pn; + unsigned long flags; + uint64_t bps = -1; + spin_lock_irqsave(&blkcg->lock, flags); pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL, BLKIO_THROTL_read_bps_device); if (pn) - return pn->val.bps; - else - return -1; + bps = pn->val.bps; + spin_unlock_irqrestore(&blkcg->lock, flags); + + return bps; } uint64_t blkcg_get_write_bps(struct blkio_cgroup *blkcg, dev_t dev) { struct blkio_policy_node *pn; + unsigned long flags; + uint64_t bps = -1; + + spin_lock_irqsave(&blkcg->lock, flags); pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL, BLKIO_THROTL_write_bps_device); if (pn) - return pn->val.bps; - else - return -1; + bps = pn->val.bps; + spin_unlock_irqrestore(&blkcg->lock, flags); + + return bps; } unsigned int blkcg_get_read_iops(struct blkio_cgroup *blkcg, dev_t dev) { struct blkio_policy_node *pn; + unsigned long flags; + unsigned int iops = -1; + spin_lock_irqsave(&blkcg->lock, flags); pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL, BLKIO_THROTL_read_iops_device); if (pn) - return pn->val.iops; - else - return -1; + iops = pn->val.iops; + spin_unlock_irqrestore(&blkcg->lock, flags); + + return iops; } unsigned int blkcg_get_write_iops(struct blkio_cgroup *blkcg, dev_t dev) { struct blkio_policy_node *pn; + unsigned long flags; + unsigned int iops = -1; + + spin_lock_irqsave(&blkcg->lock, flags); pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL, BLKIO_THROTL_write_iops_device); if (pn) - return pn->val.iops; - else - return -1; + iops = pn->val.iops; + spin_unlock_irqrestore(&blkcg->lock, flags); + + return iops; } /* Checks whether user asked for deleting a policy rule */ @@ -1085,6 +1101,7 @@ static int blkiocg_file_write(struct cgroup *cgrp, struct cftype *cft, if (blkio_delete_rule_command(newpn)) { blkio_policy_delete_node(pn); + kfree(pn); spin_unlock_irq(&blkcg->lock); goto update_io_group; } diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h index a71d2904ffb..6f3ace7e792 100644 --- a/block/blk-cgroup.h +++ b/block/blk-cgroup.h @@ -188,7 +188,7 @@ struct blkio_policy_node { union { unsigned int weight; /* - * Rate read/write in terms of byptes per second + * Rate read/write in terms of bytes per second * Whether this rate represents read or write is determined * by file type "fileid". */ diff --git a/block/blk-core.c b/block/blk-core.c index d34433ae791..f43c8a5840a 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -28,6 +28,7 @@ #include <linux/task_io_accounting_ops.h> #include <linux/fault-inject.h> #include <linux/list_sort.h> +#include <linux/delay.h> #define CREATE_TRACE_POINTS #include <trace/events/block.h> @@ -38,8 +39,6 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap); EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete); -static int __make_request(struct request_queue *q, struct bio *bio); - /* * For the allocated request tables */ @@ -347,30 +346,80 @@ void blk_put_queue(struct request_queue *q) } EXPORT_SYMBOL(blk_put_queue); -/* - * Note: If a driver supplied the queue lock, it is disconnected - * by this function. The actual state of the lock doesn't matter - * here as the request_queue isn't accessible after this point - * (QUEUE_FLAG_DEAD is set) and no other requests will be queued. +/** + * blk_drain_queue - drain requests from request_queue + * @q: queue to drain + * @drain_all: whether to drain all requests or only the ones w/ ELVPRIV + * + * Drain requests from @q. If @drain_all is set, all requests are drained. + * If not, only ELVPRIV requests are drained. The caller is responsible + * for ensuring that no new requests which need to be drained are queued. + */ +void blk_drain_queue(struct request_queue *q, bool drain_all) +{ + while (true) { + int nr_rqs; + + spin_lock_irq(q->queue_lock); + + elv_drain_elevator(q); + if (drain_all) + blk_throtl_drain(q); + + __blk_run_queue(q); + + if (drain_all) + nr_rqs = q->rq.count[0] + q->rq.count[1]; + else + nr_rqs = q->rq.elvpriv; + + spin_unlock_irq(q->queue_lock); + + if (!nr_rqs) + break; + msleep(10); + } +} + +/** + * blk_cleanup_queue - shutdown a request queue + * @q: request queue to shutdown + * + * Mark @q DEAD, drain all pending requests, destroy and put it. All + * future requests will be failed immediately with -ENODEV. */ void blk_cleanup_queue(struct request_queue *q) { - /* - * We know we have process context here, so we can be a little - * cautious and ensure that pending block actions on this device - * are done before moving on. Going into this function, we should - * not have processes doing IO to this device. - */ - blk_sync_queue(q); + spinlock_t *lock = q->queue_lock; - del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer); + /* mark @q DEAD, no new request or merges will be allowed afterwards */ mutex_lock(&q->sysfs_lock); queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q); - mutex_unlock(&q->sysfs_lock); + + spin_lock_irq(lock); + queue_flag_set(QUEUE_FLAG_NOMERGES, q); + queue_flag_set(QUEUE_FLAG_NOXMERGES, q); + queue_flag_set(QUEUE_FLAG_DEAD, q); if (q->queue_lock != &q->__queue_lock) q->queue_lock = &q->__queue_lock; + spin_unlock_irq(lock); + mutex_unlock(&q->sysfs_lock); + + /* + * Drain all requests queued before DEAD marking. The caller might + * be trying to tear down @q before its elevator is initialized, in + * which case we don't want to call into draining. + */ + if (q->elevator) + blk_drain_queue(q, true); + + /* @q won't process any more request, flush async actions */ + del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer); + blk_sync_queue(q); + + /* @q is and will stay empty, shutdown and put */ blk_put_queue(q); } EXPORT_SYMBOL(blk_cleanup_queue); @@ -541,7 +590,7 @@ blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn, /* * This also sets hw/phys segments, boundary and size */ - blk_queue_make_request(q, __make_request); + blk_queue_make_request(q, blk_queue_bio); q->sg_reserved_size = INT_MAX; @@ -576,7 +625,7 @@ static inline void blk_free_request(struct request_queue *q, struct request *rq) } static struct request * -blk_alloc_request(struct request_queue *q, int flags, int priv, gfp_t gfp_mask) +blk_alloc_request(struct request_queue *q, unsigned int flags, gfp_t gfp_mask) { struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask); @@ -587,12 +636,10 @@ blk_alloc_request(struct request_queue *q, int flags, int priv, gfp_t gfp_mask) rq->cmd_flags = flags | REQ_ALLOCED; - if (priv) { - if (unlikely(elv_set_request(q, rq, gfp_mask))) { - mempool_free(rq, q->rq.rq_pool); - return NULL; - } - rq->cmd_flags |= REQ_ELVPRIV; + if ((flags & REQ_ELVPRIV) && + unlikely(elv_set_request(q, rq, gfp_mask))) { + mempool_free(rq, q->rq.rq_pool); + return NULL; } return rq; @@ -651,12 +698,13 @@ static void __freed_request(struct request_queue *q, int sync) * A request has just been released. Account for it, update the full and * congestion status, wake up any waiters. Called under q->queue_lock. */ -static void freed_request(struct request_queue *q, int sync, int priv) +static void freed_request(struct request_queue *q, unsigned int flags) { struct request_list *rl = &q->rq; + int sync = rw_is_sync(flags); rl->count[sync]--; - if (priv) + if (flags & REQ_ELVPRIV) rl->elvpriv--; __freed_request(q, sync); @@ -684,10 +732,19 @@ static bool blk_rq_should_init_elevator(struct bio *bio) return true; } -/* - * Get a free request, queue_lock must be held. - * Returns NULL on failure, with queue_lock held. - * Returns !NULL on success, with queue_lock *not held*. +/** + * get_request - get a free request + * @q: request_queue to allocate request from + * @rw_flags: RW and SYNC flags + * @bio: bio to allocate request for (can be %NULL) + * @gfp_mask: allocation mask + * + * Get a free request from @q. This function may fail under memory + * pressure or if @q is dead. + * + * Must be callled with @q->queue_lock held and, + * Returns %NULL on failure, with @q->queue_lock held. + * Returns !%NULL on success, with @q->queue_lock *not held*. */ static struct request *get_request(struct request_queue *q, int rw_flags, struct bio *bio, gfp_t gfp_mask) @@ -696,7 +753,10 @@ static struct request *get_request(struct request_queue *q, int rw_flags, struct request_list *rl = &q->rq; struct io_context *ioc = NULL; const bool is_sync = rw_is_sync(rw_flags) != 0; - int may_queue, priv = 0; + int may_queue; + + if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) + return NULL; may_queue = elv_may_queue(q, rw_flags); if (may_queue == ELV_MQUEUE_NO) @@ -740,17 +800,17 @@ static struct request *get_request(struct request_queue *q, int rw_flags, rl->count[is_sync]++; rl->starved[is_sync] = 0; - if (blk_rq_should_init_elevator(bio)) { - priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); - if (priv) - rl->elvpriv++; + if (blk_rq_should_init_elevator(bio) && + !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags)) { + rw_flags |= REQ_ELVPRIV; + rl->elvpriv++; } if (blk_queue_io_stat(q)) rw_flags |= REQ_IO_STAT; spin_unlock_irq(q->queue_lock); - rq = blk_alloc_request(q, rw_flags, priv, gfp_mask); + rq = blk_alloc_request(q, rw_flags, gfp_mask); if (unlikely(!rq)) { /* * Allocation failed presumably due to memory. Undo anything @@ -760,7 +820,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags, * wait queue, but this is pretty rare. */ spin_lock_irq(q->queue_lock); - freed_request(q, is_sync, priv); + freed_request(q, rw_flags); /* * in the very unlikely event that allocation failed and no @@ -790,11 +850,18 @@ out: return rq; } -/* - * No available requests for this queue, wait for some requests to become - * available. +/** + * get_request_wait - get a free request with retry + * @q: request_queue to allocate request from + * @rw_flags: RW and SYNC flags + * @bio: bio to allocate request for (can be %NULL) + * + * Get a free request from @q. This function keeps retrying under memory + * pressure and fails iff @q is dead. * - * Called with q->queue_lock held, and returns with it unlocked. + * Must be callled with @q->queue_lock held and, + * Returns %NULL on failure, with @q->queue_lock held. + * Returns !%NULL on success, with @q->queue_lock *not held*. */ static struct request *get_request_wait(struct request_queue *q, int rw_flags, struct bio *bio) @@ -808,6 +875,9 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags, struct io_context *ioc; struct request_list *rl = &q->rq; + if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) + return NULL; + prepare_to_wait_exclusive(&rl->wait[is_sync], &wait, TASK_UNINTERRUPTIBLE); @@ -838,19 +908,15 @@ struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) { struct request *rq; - if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) - return NULL; - BUG_ON(rw != READ && rw != WRITE); spin_lock_irq(q->queue_lock); - if (gfp_mask & __GFP_WAIT) { + if (gfp_mask & __GFP_WAIT) rq = get_request_wait(q, rw, NULL); - } else { + else rq = get_request(q, rw, NULL, gfp_mask); - if (!rq) - spin_unlock_irq(q->queue_lock); - } + if (!rq) + spin_unlock_irq(q->queue_lock); /* q->queue_lock is unlocked at this point */ return rq; @@ -1052,14 +1118,13 @@ void __blk_put_request(struct request_queue *q, struct request *req) * it didn't come out of our reserved rq pools */ if (req->cmd_flags & REQ_ALLOCED) { - int is_sync = rq_is_sync(req) != 0; - int priv = req->cmd_flags & REQ_ELVPRIV; + unsigned int flags = req->cmd_flags; BUG_ON(!list_empty(&req->queuelist)); BUG_ON(!hlist_unhashed(&req->hash)); blk_free_request(q, req); - freed_request(q, is_sync, priv); + freed_request(q, flags); } } EXPORT_SYMBOL_GPL(__blk_put_request); @@ -1161,18 +1226,32 @@ static bool bio_attempt_front_merge(struct request_queue *q, return true; } -/* - * Attempts to merge with the plugged list in the current process. Returns - * true if merge was successful, otherwise false. +/** + * attempt_plug_merge - try to merge with %current's plugged list + * @q: request_queue new bio is being queued at + * @bio: new bio being queued + * @request_count: out parameter for number of traversed plugged requests + * + * Determine whether @bio being queued on @q can be merged with a request + * on %current's plugged list. Returns %true if merge was successful, + * otherwise %false. + * + * This function is called without @q->queue_lock; however, elevator is + * accessed iff there already are requests on the plugged list which in + * turn guarantees validity of the elevator. + * + * Note that, on successful merge, elevator operation + * elevator_bio_merged_fn() will be called without queue lock. Elevator + * must be ready for this. */ -static bool attempt_plug_merge(struct task_struct *tsk, struct request_queue *q, - struct bio *bio, unsigned int *request_count) +static bool attempt_plug_merge(struct request_queue *q, struct bio *bio, + unsigned int *request_count) { struct blk_plug *plug; struct request *rq; bool ret = false; - plug = tsk->plug; + plug = current->plug; if (!plug) goto out; *request_count = 0; @@ -1202,7 +1281,6 @@ out: void init_request_from_bio(struct request *req, struct bio *bio) { - req->cpu = bio->bi_comp_cpu; req->cmd_type = REQ_TYPE_FS; req->cmd_flags |= bio->bi_rw & REQ_COMMON_MASK; @@ -1215,7 +1293,7 @@ void init_request_from_bio(struct request *req, struct bio *bio) blk_rq_bio_prep(req->q, req, bio); } -static int __make_request(struct request_queue *q, struct bio *bio) +void blk_queue_bio(struct request_queue *q, struct bio *bio) { const bool sync = !!(bio->bi_rw & REQ_SYNC); struct blk_plug *plug; @@ -1240,8 +1318,8 @@ static int __make_request(struct request_queue *q, struct bio *bio) * Check if we can merge with the plugged list before grabbing * any locks. */ - if (attempt_plug_merge(current, q, bio, &request_count)) - goto out; + if (attempt_plug_merge(q, bio, &request_count)) + return; spin_lock_irq(q->queue_lock); @@ -1275,6 +1353,10 @@ get_rq: * Returns with the queue unlocked. */ req = get_request_wait(q, rw_flags, bio); + if (unlikely(!req)) { + bio_endio(bio, -ENODEV); /* @q is dead */ + goto out_unlock; + } /* * After dropping the lock and possibly sleeping here, our request @@ -1284,8 +1366,7 @@ get_rq: */ init_request_from_bio(req, bio); - if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) || - bio_flagged(bio, BIO_CPU_AFFINE)) + if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags)) req->cpu = raw_smp_processor_id(); plug = current->plug; @@ -1316,9 +1397,8 @@ get_rq: out_unlock: spin_unlock_irq(q->queue_lock); } -out: - return 0; } +EXPORT_SYMBOL_GPL(blk_queue_bio); /* for device mapper only */ /* * If bio->bi_dev is a partition, remap the location @@ -1417,165 +1497,135 @@ static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors) return 0; } -/** - * generic_make_request - hand a buffer to its device driver for I/O - * @bio: The bio describing the location in memory and on the device. - * - * generic_make_request() is used to make I/O requests of block - * devices. It is passed a &struct bio, which describes the I/O that needs - * to be done. - * - * generic_make_request() does not return any status. The - * success/failure status of the request, along with notification of - * completion, is delivered asynchronously through the bio->bi_end_io - * function described (one day) else where. - * - * The caller of generic_make_request must make sure that bi_io_vec - * are set to describe the memory buffer, and that bi_dev and bi_sector are - * set to describe the device address, and the - * bi_end_io and optionally bi_private are set to describe how - * completion notification should be signaled. - * - * generic_make_request and the drivers it calls may use bi_next if this - * bio happens to be merged with someone else, and may change bi_dev and - * bi_sector for remaps as it sees fit. So the values of these fields - * should NOT be depended on after the call to generic_make_request. - */ -static inline void __generic_make_request(struct bio *bio) +static noinline_for_stack bool +generic_make_request_checks(struct bio *bio) { struct request_queue *q; - sector_t old_sector; - int ret, nr_sectors = bio_sectors(bio); - dev_t old_dev; + int nr_sectors = bio_sectors(bio); int err = -EIO; + char b[BDEVNAME_SIZE]; + struct hd_struct *part; might_sleep(); if (bio_check_eod(bio, nr_sectors)) goto end_io; - /* - * Resolve the mapping until finished. (drivers are - * still free to implement/resolve their own stacking - * by explicitly returning 0) - * - * NOTE: we don't repeat the blk_size check for each new device. - * Stacking drivers are expected to know what they are doing. - */ - old_sector = -1; - old_dev = 0; - do { - char b[BDEVNAME_SIZE]; - struct hd_struct *part; - - q = bdev_get_queue(bio->bi_bdev); - if (unlikely(!q)) { - printk(KERN_ERR - "generic_make_request: Trying to access " - "nonexistent block-device %s (%Lu)\n", - bdevname(bio->bi_bdev, b), - (long long) bio->bi_sector); - goto end_io; - } - - if (unlikely(!(bio->bi_rw & REQ_DISCARD) && - nr_sectors > queue_max_hw_sectors(q))) { - printk(KERN_ERR "bio too big device %s (%u > %u)\n", - bdevname(bio->bi_bdev, b), - bio_sectors(bio), - queue_max_hw_sectors(q)); - goto end_io; - } - - if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) - goto end_io; - - part = bio->bi_bdev->bd_part; - if (should_fail_request(part, bio->bi_size) || - should_fail_request(&part_to_disk(part)->part0, - bio->bi_size)) - goto end_io; - - /* - * If this device has partitions, remap block n - * of partition p to block n+start(p) of the disk. - */ - blk_partition_remap(bio); + q = bdev_get_queue(bio->bi_bdev); + if (unlikely(!q)) { + printk(KERN_ERR + "generic_make_request: Trying to access " + "nonexistent block-device %s (%Lu)\n", + bdevname(bio->bi_bdev, b), + (long long) bio->bi_sector); + goto end_io; + } - if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) - goto end_io; + if (unlikely(!(bio->bi_rw & REQ_DISCARD) && + nr_sectors > queue_max_hw_sectors(q))) { + printk(KERN_ERR "bio too big device %s (%u > %u)\n", + bdevname(bio->bi_bdev, b), + bio_sectors(bio), + queue_max_hw_sectors(q)); + goto end_io; + } - if (old_sector != -1) - trace_block_bio_remap(q, bio, old_dev, old_sector); + part = bio->bi_bdev->bd_part; + if (should_fail_request(part, bio->bi_size) || + should_fail_request(&part_to_disk(part)->part0, + bio->bi_size)) + goto end_io; - old_sector = bio->bi_sector; - old_dev = bio->bi_bdev->bd_ |