diff options
Diffstat (limited to 'drivers/md/dm-io.c')
| -rw-r--r-- | drivers/md/dm-io.c | 180 |
1 files changed, 91 insertions, 89 deletions
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index 10f457ca6af..db404a0f7e2 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c @@ -10,6 +10,7 @@ #include <linux/device-mapper.h> #include <linux/bio.h> +#include <linux/completion.h> #include <linux/mempool.h> #include <linux/module.h> #include <linux/sched.h> @@ -31,44 +32,34 @@ struct dm_io_client { */ struct io { unsigned long error_bits; - unsigned long eopnotsupp_bits; atomic_t count; - struct task_struct *sleeper; + struct completion *wait; struct dm_io_client *client; io_notify_fn callback; void *context; + void *vma_invalidate_address; + unsigned long vma_invalidate_size; } __attribute__((aligned(DM_IO_MAX_REGIONS))); static struct kmem_cache *_dm_io_cache; /* - * io contexts are only dynamically allocated for asynchronous - * io. Since async io is likely to be the majority of io we'll - * have the same number of io contexts as bios! (FIXME: must reduce this). - */ - -static unsigned int pages_to_ios(unsigned int pages) -{ - return 4 * pages; /* too many ? */ -} - -/* * Create a client with mempool and bioset. */ -struct dm_io_client *dm_io_client_create(unsigned num_pages) +struct dm_io_client *dm_io_client_create(void) { - unsigned ios = pages_to_ios(num_pages); struct dm_io_client *client; + unsigned min_ios = dm_get_reserved_bio_based_ios(); client = kmalloc(sizeof(*client), GFP_KERNEL); if (!client) return ERR_PTR(-ENOMEM); - client->pool = mempool_create_slab_pool(ios, _dm_io_cache); + client->pool = mempool_create_slab_pool(min_ios, _dm_io_cache); if (!client->pool) goto bad; - client->bios = bioset_create(16, 0); + client->bios = bioset_create(min_ios, 0); if (!client->bios) goto bad; @@ -82,13 +73,6 @@ struct dm_io_client *dm_io_client_create(unsigned num_pages) } EXPORT_SYMBOL(dm_io_client_create); -int dm_io_client_resize(unsigned num_pages, struct dm_io_client *client) -{ - return mempool_resize(client->pool, pages_to_ios(num_pages), - GFP_KERNEL); -} -EXPORT_SYMBOL(dm_io_client_resize); - void dm_io_client_destroy(struct dm_io_client *client) { mempool_destroy(client->pool); @@ -130,15 +114,16 @@ static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io, *---------------------------------------------------------------*/ static void dec_count(struct io *io, unsigned int region, int error) { - if (error) { + if (error) set_bit(region, &io->error_bits); - if (error == -EOPNOTSUPP) - set_bit(region, &io->eopnotsupp_bits); - } if (atomic_dec_and_test(&io->count)) { - if (io->sleeper) - wake_up_process(io->sleeper); + if (io->vma_invalidate_size) + invalidate_kernel_vmap_range(io->vma_invalidate_address, + io->vma_invalidate_size); + + if (io->wait) + complete(io->wait); else { unsigned long r = io->error_bits; @@ -180,6 +165,9 @@ struct dpages { unsigned context_u; void *context_ptr; + + void *vma_invalidate_address; + unsigned long vma_invalidate_size; }; /* @@ -214,26 +202,28 @@ static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offse /* * Functions for getting the pages from a bvec. */ -static void bvec_get_page(struct dpages *dp, - struct page **p, unsigned long *len, unsigned *offset) +static void bio_get_page(struct dpages *dp, struct page **p, + unsigned long *len, unsigned *offset) { - struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr; + struct bio_vec *bvec = dp->context_ptr; *p = bvec->bv_page; - *len = bvec->bv_len; - *offset = bvec->bv_offset; + *len = bvec->bv_len - dp->context_u; + *offset = bvec->bv_offset + dp->context_u; } -static void bvec_next_page(struct dpages *dp) +static void bio_next_page(struct dpages *dp) { - struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr; + struct bio_vec *bvec = dp->context_ptr; dp->context_ptr = bvec + 1; + dp->context_u = 0; } -static void bvec_dp_init(struct dpages *dp, struct bio_vec *bvec) +static void bio_dp_init(struct dpages *dp, struct bio *bio) { - dp->get_page = bvec_get_page; - dp->next_page = bvec_next_page; - dp->context_ptr = bvec; + dp->get_page = bio_get_page; + dp->next_page = bio_next_page; + dp->context_ptr = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter); + dp->context_u = bio->bi_iter.bi_bvec_done; } /* @@ -261,16 +251,6 @@ static void vm_dp_init(struct dpages *dp, void *data) dp->context_ptr = data; } -static void dm_bio_destructor(struct bio *bio) -{ - unsigned region; - struct io *io; - - retrieve_io_and_region_from_bio(bio, &io, ®ion); - - bio_free(bio, io->client->bios); -} - /* * Functions for getting the pages from kernel memory. */ @@ -308,29 +288,50 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where, unsigned offset; unsigned num_bvecs; sector_t remaining = where->count; + struct request_queue *q = bdev_get_queue(where->bdev); + unsigned short logical_block_size = queue_logical_block_size(q); + sector_t num_sectors; /* - * where->count may be zero if rw holds a write barrier and we - * need to send a zero-sized barrier. + * where->count may be zero if rw holds a flush and we need to + * send a zero-sized flush. */ do { /* * Allocate a suitably sized-bio. */ - num_bvecs = dm_sector_div_up(remaining, - (PAGE_SIZE >> SECTOR_SHIFT)); - num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev), num_bvecs); + if ((rw & REQ_DISCARD) || (rw & REQ_WRITE_SAME)) + num_bvecs = 1; + else + num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev), + dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT))); + bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios); - bio->bi_sector = where->sector + (where->count - remaining); + bio->bi_iter.bi_sector = where->sector + (where->count - remaining); bio->bi_bdev = where->bdev; bio->bi_end_io = endio; - bio->bi_destructor = dm_bio_destructor; store_io_and_region_in_bio(bio, io, region); - /* - * Try and add as many pages as possible. - */ - while (remaining) { + if (rw & REQ_DISCARD) { + num_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining); + bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT; + remaining -= num_sectors; + } else if (rw & REQ_WRITE_SAME) { + /* + * WRITE SAME only uses a single page. + */ + dp->get_page(dp, &page, &len, &offset); + bio_add_page(bio, page, logical_block_size, offset); + num_sectors = min_t(sector_t, q->limits.max_write_same_sectors, remaining); + bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT; + + offset = 0; + remaining -= num_sectors; + dp->next_page(dp); + } else while (remaining) { + /* + * Try and add as many pages as possible. + */ dp->get_page(dp, &page, &len, &offset); len = min(len, to_bytes(remaining)); if (!bio_add_page(bio, page, len, offset)) @@ -356,7 +357,7 @@ static void dispatch_io(int rw, unsigned int num_regions, BUG_ON(num_regions > DM_IO_MAX_REGIONS); if (sync) - rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG); + rw |= REQ_SYNC; /* * For multiple regions we need to be careful to rewind @@ -364,7 +365,7 @@ static void dispatch_io(int rw, unsigned int num_regions, */ for (i = 0; i < num_regions; i++) { *dp = old_pages; - if (where[i].count || (rw & (1 << BIO_RW_BARRIER))) + if (where[i].count || (rw & REQ_FLUSH)) do_region(rw, i, where + i, dp, io); } @@ -387,35 +388,24 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions, */ volatile char io_[sizeof(struct io) + __alignof__(struct io) - 1]; struct io *io = (struct io *)PTR_ALIGN(&io_, __alignof__(struct io)); + DECLARE_COMPLETION_ONSTACK(wait); if (num_regions > 1 && (rw & RW_MASK) != WRITE) { WARN_ON(1); return -EIO; } -retry: io->error_bits = 0; - io->eopnotsupp_bits = 0; atomic_set(&io->count, 1); /* see dispatch_io() */ - io->sleeper = current; + io->wait = &wait; io->client = client; - dispatch_io(rw, num_regions, where, dp, io, 1); + io->vma_invalidate_address = dp->vma_invalidate_address; + io->vma_invalidate_size = dp->vma_invalidate_size; - while (1) { - set_current_state(TASK_UNINTERRUPTIBLE); - - if (!atomic_read(&io->count)) - break; - - io_schedule(); - } - set_current_state(TASK_RUNNING); + dispatch_io(rw, num_regions, where, dp, io, 1); - if (io->eopnotsupp_bits && (rw & (1 << BIO_RW_BARRIER))) { - rw &= ~(1 << BIO_RW_BARRIER); - goto retry; - } + wait_for_completion_io(&wait); if (error_bits) *error_bits = io->error_bits; @@ -437,30 +427,42 @@ static int async_io(struct dm_io_client *client, unsigned int num_regions, io = mempool_alloc(client->pool, GFP_NOIO); io->error_bits = 0; - io->eopnotsupp_bits = 0; atomic_set(&io->count, 1); /* see dispatch_io() */ - io->sleeper = NULL; + io->wait = NULL; io->client = client; io->callback = fn; io->context = context; + io->vma_invalidate_address = dp->vma_invalidate_address; + io->vma_invalidate_size = dp->vma_invalidate_size; + dispatch_io(rw, num_regions, where, dp, io, 0); return 0; } -static int dp_init(struct dm_io_request *io_req, struct dpages *dp) +static int dp_init(struct dm_io_request *io_req, struct dpages *dp, + unsigned long size) { /* Set up dpages based on memory type */ + + dp->vma_invalidate_address = NULL; + dp->vma_invalidate_size = 0; + switch (io_req->mem.type) { case DM_IO_PAGE_LIST: list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset); break; - case DM_IO_BVEC: - bvec_dp_init(dp, io_req->mem.ptr.bvec); + case DM_IO_BIO: + bio_dp_init(dp, io_req->mem.ptr.bio); break; case DM_IO_VMA: + flush_kernel_vmap_range(io_req->mem.ptr.vma, size); + if ((io_req->bi_rw & RW_MASK) == READ) { + dp->vma_invalidate_address = io_req->mem.ptr.vma; + dp->vma_invalidate_size = size; + } vm_dp_init(dp, io_req->mem.ptr.vma); break; @@ -479,8 +481,8 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp) * New collapsed (a)synchronous interface. * * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug - * the queue with blk_unplug() some time later or set the BIO_RW_SYNC bit in - * io_req->bi_rw. If you fail to do one of these, the IO will be submitted to + * the queue with blk_unplug() some time later or set REQ_SYNC in +io_req->bi_rw. If you fail to do one of these, the IO will be submitted to * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c. */ int dm_io(struct dm_io_request *io_req, unsigned num_regions, @@ -489,7 +491,7 @@ int dm_io(struct dm_io_request *io_req, unsigned num_regions, int r; struct dpages dp; - r = dp_init(io_req, &dp); + r = dp_init(io_req, &dp, (unsigned long)where->count << SECTOR_SHIFT); if (r) return r; |
