diff options
Diffstat (limited to 'drivers/md/dm-raid1.c')
| -rw-r--r-- | drivers/md/dm-raid1.c | 160 | 
1 files changed, 75 insertions, 85 deletions
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index 19a59b041c2..7dfdb5c746d 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c @@ -22,8 +22,6 @@  #define DM_MSG_PREFIX "raid1"  #define MAX_RECOVERY 1	/* Maximum number of regions recovered in parallel. */ -#define DM_IO_PAGES 64 -#define DM_KCOPYD_PAGES 64  #define DM_RAID1_HANDLE_ERRORS 0x01  #define errors_handled(p)	((p)->features & DM_RAID1_HANDLE_ERRORS) @@ -63,7 +61,6 @@ struct mirror_set {  	struct dm_region_hash *rh;  	struct dm_kcopyd_client *kcopyd_client;  	struct dm_io_client *io_client; -	mempool_t *read_record_pool;  	/* recovery */  	region_t nr_regions; @@ -85,6 +82,9 @@ struct mirror_set {  	struct mirror mirror[0];  }; +DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(raid1_resync_throttle, +		"A percentage of time allocated for raid resynchronization"); +  static void wakeup_mirrord(void *context)  {  	struct mirror_set *ms = context; @@ -141,14 +141,13 @@ static void dispatch_bios(void *context, struct bio_list *bio_list)  		queue_bio(ms, bio, WRITE);  } -#define MIN_READ_RECORDS 20 -struct dm_raid1_read_record { +struct dm_raid1_bio_record {  	struct mirror *m; +	/* if details->bi_bdev == NULL, details were not saved */  	struct dm_bio_details details; +	region_t write_region;  }; -static struct kmem_cache *_dm_raid1_read_record_cache; -  /*   * Every mirror should look like this one.   */ @@ -261,7 +260,7 @@ static int mirror_flush(struct dm_target *ti)  	struct dm_io_request io_req = {  		.bi_rw = WRITE_FLUSH,  		.mem.type = DM_IO_KMEM, -		.mem.ptr.bvec = NULL, +		.mem.ptr.addr = NULL,  		.client = ms->io_client,  	}; @@ -433,7 +432,7 @@ static int mirror_available(struct mirror_set *ms, struct bio *bio)  	region_t region = dm_rh_bio_to_region(ms->rh, bio);  	if (log->type->in_sync(log, region, 0)) -		return choose_mirror(ms,  bio->bi_sector) ? 1 : 0; +		return choose_mirror(ms,  bio->bi_iter.bi_sector) ? 1 : 0;  	return 0;  } @@ -443,15 +442,15 @@ static int mirror_available(struct mirror_set *ms, struct bio *bio)   */  static sector_t map_sector(struct mirror *m, struct bio *bio)  { -	if (unlikely(!bio->bi_size)) +	if (unlikely(!bio->bi_iter.bi_size))  		return 0; -	return m->offset + dm_target_offset(m->ms->ti, bio->bi_sector); +	return m->offset + dm_target_offset(m->ms->ti, bio->bi_iter.bi_sector);  }  static void map_bio(struct mirror *m, struct bio *bio)  {  	bio->bi_bdev = m->dev->bdev; -	bio->bi_sector = map_sector(m, bio); +	bio->bi_iter.bi_sector = map_sector(m, bio);  }  static void map_region(struct dm_io_region *io, struct mirror *m, @@ -459,7 +458,7 @@ static void map_region(struct dm_io_region *io, struct mirror *m,  {  	io->bdev = m->dev->bdev;  	io->sector = map_sector(m, bio); -	io->count = bio->bi_size >> 9; +	io->count = bio_sectors(bio);  }  static void hold_bio(struct mirror_set *ms, struct bio *bio) @@ -527,8 +526,8 @@ static void read_async_bio(struct mirror *m, struct bio *bio)  	struct dm_io_region io;  	struct dm_io_request io_req = {  		.bi_rw = READ, -		.mem.type = DM_IO_BVEC, -		.mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx, +		.mem.type = DM_IO_BIO, +		.mem.ptr.bio = bio,  		.notify.fn = read_callback,  		.notify.context = bio,  		.client = m->ms->io_client, @@ -560,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)  		 * We can only read balance if the region is in sync.  		 */  		if (likely(region_in_sync(ms, region, 1))) -			m = choose_mirror(ms, bio->bi_sector); +			m = choose_mirror(ms, bio->bi_iter.bi_sector);  		else if (m && atomic_read(&m->error_count))  			m = NULL; @@ -630,13 +629,19 @@ static void do_write(struct mirror_set *ms, struct bio *bio)  	struct mirror *m;  	struct dm_io_request io_req = {  		.bi_rw = WRITE | (bio->bi_rw & WRITE_FLUSH_FUA), -		.mem.type = DM_IO_BVEC, -		.mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx, +		.mem.type = DM_IO_BIO, +		.mem.ptr.bio = bio,  		.notify.fn = write_callback,  		.notify.context = bio,  		.client = ms->io_client,  	}; +	if (bio->bi_rw & REQ_DISCARD) { +		io_req.bi_rw |= REQ_DISCARD; +		io_req.mem.type = DM_IO_KMEM; +		io_req.mem.ptr.addr = NULL; +	} +  	for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++)  		map_region(dest++, m, bio); @@ -670,7 +675,8 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)  	bio_list_init(&requeue);  	while ((bio = bio_list_pop(writes))) { -		if (bio->bi_rw & REQ_FLUSH) { +		if ((bio->bi_rw & REQ_FLUSH) || +		    (bio->bi_rw & REQ_DISCARD)) {  			bio_list_add(&sync, bio);  			continue;  		} @@ -835,8 +841,6 @@ static void do_mirror(struct work_struct *work)  	do_reads(ms, &reads);  	do_writes(ms, &writes);  	do_failures(ms, &failures); - -	dm_table_unplug_all(ms->ti->table);  }  /*----------------------------------------------------------------- @@ -873,19 +877,9 @@ static struct mirror_set *alloc_context(unsigned int nr_mirrors,  	atomic_set(&ms->suspend, 0);  	atomic_set(&ms->default_mirror, DEFAULT_MIRROR); -	ms->read_record_pool = mempool_create_slab_pool(MIN_READ_RECORDS, -						_dm_raid1_read_record_cache); - -	if (!ms->read_record_pool) { -		ti->error = "Error creating mirror read_record_pool"; -		kfree(ms); -		return NULL; -	} - -	ms->io_client = dm_io_client_create(DM_IO_PAGES); +	ms->io_client = dm_io_client_create();  	if (IS_ERR(ms->io_client)) {  		ti->error = "Error creating dm_io client"; -		mempool_destroy(ms->read_record_pool);  		kfree(ms);   		return NULL;  	} @@ -897,7 +891,6 @@ static struct mirror_set *alloc_context(unsigned int nr_mirrors,  	if (IS_ERR(ms->rh)) {  		ti->error = "Error creating dirty region hash";  		dm_io_client_destroy(ms->io_client); -		mempool_destroy(ms->read_record_pool);  		kfree(ms);  		return NULL;  	} @@ -913,7 +906,6 @@ static void free_context(struct mirror_set *ms, struct dm_target *ti,  	dm_io_client_destroy(ms->io_client);  	dm_region_hash_destroy(ms->rh); -	mempool_destroy(ms->read_record_pool);  	kfree(ms);  } @@ -921,8 +913,9 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,  		      unsigned int mirror, char **argv)  {  	unsigned long long offset; +	char dummy; -	if (sscanf(argv[1], "%llu", &offset) != 1) { +	if (sscanf(argv[1], "%llu%c", &offset, &dummy) != 1) {  		ti->error = "Invalid offset";  		return -EINVAL;  	} @@ -950,13 +943,14 @@ static struct dm_dirty_log *create_dirty_log(struct dm_target *ti,  {  	unsigned param_count;  	struct dm_dirty_log *dl; +	char dummy;  	if (argc < 2) {  		ti->error = "Insufficient mirror log arguments";  		return NULL;  	} -	if (sscanf(argv[1], "%u", ¶m_count) != 1) { +	if (sscanf(argv[1], "%u%c", ¶m_count, &dummy) != 1) {  		ti->error = "Invalid mirror log argument count";  		return NULL;  	} @@ -983,13 +977,14 @@ static int parse_features(struct mirror_set *ms, unsigned argc, char **argv,  {  	unsigned num_features;  	struct dm_target *ti = ms->ti; +	char dummy;  	*args_used = 0;  	if (!argc)  		return 0; -	if (sscanf(argv[0], "%u", &num_features) != 1) { +	if (sscanf(argv[0], "%u%c", &num_features, &dummy) != 1) {  		ti->error = "Invalid number of features";  		return -EINVAL;  	} @@ -1033,6 +1028,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)  	unsigned int nr_mirrors, m, args_used;  	struct mirror_set *ms;  	struct dm_dirty_log *dl; +	char dummy;  	dl = create_dirty_log(ti, argc, argv, &args_used);  	if (!dl) @@ -1041,7 +1037,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)  	argv += args_used;  	argc -= args_used; -	if (!argc || sscanf(argv[0], "%u", &nr_mirrors) != 1 || +	if (!argc || sscanf(argv[0], "%u%c", &nr_mirrors, &dummy) != 1 ||  	    nr_mirrors < 2 || nr_mirrors > DM_KCOPYD_MAX_REGIONS + 1) {  		ti->error = "Invalid number of mirrors";  		dm_dirty_log_destroy(dl); @@ -1074,10 +1070,17 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)  	}  	ti->private = ms; -	ti->split_io = dm_rh_get_region_size(ms->rh); -	ti->num_flush_requests = 1; -	ms->kmirrord_wq = create_singlethread_workqueue("kmirrord"); +	r = dm_set_target_max_io_len(ti, dm_rh_get_region_size(ms->rh)); +	if (r) +		goto err_free_context; + +	ti->num_flush_bios = 1; +	ti->num_discard_bios = 1; +	ti->per_bio_data_size = sizeof(struct dm_raid1_bio_record); +	ti->discard_zeroes_data_unsupported = true; + +	ms->kmirrord_wq = alloc_workqueue("kmirrord", WQ_MEM_RECLAIM, 0);  	if (!ms->kmirrord_wq) {  		DMERR("couldn't start kmirrord");  		r = -ENOMEM; @@ -1110,9 +1113,11 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)  		goto err_destroy_wq;  	} -	r = dm_kcopyd_client_create(DM_KCOPYD_PAGES, &ms->kcopyd_client); -	if (r) +	ms->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle); +	if (IS_ERR(ms->kcopyd_client)) { +		r = PTR_ERR(ms->kcopyd_client);  		goto err_destroy_wq; +	}  	wakeup_mirrord(ms);  	return 0; @@ -1130,7 +1135,7 @@ static void mirror_dtr(struct dm_target *ti)  	del_timer_sync(&ms->timer);  	flush_workqueue(ms->kmirrord_wq); -	flush_scheduled_work(); +	flush_work(&ms->trigger_event);  	dm_kcopyd_client_destroy(ms->kcopyd_client);  	destroy_workqueue(ms->kmirrord_wq);  	free_context(ms, ti, ms->nr_mirrors); @@ -1139,18 +1144,20 @@ static void mirror_dtr(struct dm_target *ti)  /*   * Mirror mapping function   */ -static int mirror_map(struct dm_target *ti, struct bio *bio, -		      union map_info *map_context) +static int mirror_map(struct dm_target *ti, struct bio *bio)  {  	int r, rw = bio_rw(bio);  	struct mirror *m;  	struct mirror_set *ms = ti->private; -	struct dm_raid1_read_record *read_record = NULL;  	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); +	struct dm_raid1_bio_record *bio_record = +	  dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record)); + +	bio_record->details.bi_bdev = NULL;  	if (rw == WRITE) {  		/* Save region for mirror_end_io() handler */ -		map_context->ll = dm_rh_bio_to_region(ms->rh, bio); +		bio_record->write_region = dm_rh_bio_to_region(ms->rh, bio);  		queue_bio(ms, bio, rw);  		return DM_MAPIO_SUBMITTED;  	} @@ -1174,37 +1181,33 @@ static int mirror_map(struct dm_target *ti, struct bio *bio,  	 * The region is in-sync and we can perform reads directly.  	 * Store enough information so we can retry if it fails.  	 */ -	m = choose_mirror(ms, bio->bi_sector); +	m = choose_mirror(ms, bio->bi_iter.bi_sector);  	if (unlikely(!m))  		return -EIO; -	read_record = mempool_alloc(ms->read_record_pool, GFP_NOIO); -	if (likely(read_record)) { -		dm_bio_record(&read_record->details, bio); -		map_context->ptr = read_record; -		read_record->m = m; -	} +	dm_bio_record(&bio_record->details, bio); +	bio_record->m = m;  	map_bio(m, bio);  	return DM_MAPIO_REMAPPED;  } -static int mirror_end_io(struct dm_target *ti, struct bio *bio, -			 int error, union map_info *map_context) +static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)  {  	int rw = bio_rw(bio);  	struct mirror_set *ms = (struct mirror_set *) ti->private;  	struct mirror *m = NULL;  	struct dm_bio_details *bd = NULL; -	struct dm_raid1_read_record *read_record = map_context->ptr; +	struct dm_raid1_bio_record *bio_record = +	  dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record));  	/*  	 * We need to dec pending if this was a write.  	 */  	if (rw == WRITE) { -		if (!(bio->bi_rw & REQ_FLUSH)) -			dm_rh_dec(ms->rh, map_context->ll); +		if (!(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) +			dm_rh_dec(ms->rh, bio_record->write_region);  		return error;  	} @@ -1215,7 +1218,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,  		goto out;  	if (unlikely(error)) { -		if (!read_record) { +		if (!bio_record->details.bi_bdev) {  			/*  			 * There wasn't enough memory to record necessary  			 * information for a retry or there was no other @@ -1225,7 +1228,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,  			return -EIO;  		} -		m = read_record->m; +		m = bio_record->m;  		DMERR("Mirror read failed from %s. Trying alternative device.",  		      m->dev->name); @@ -1237,22 +1240,21 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,  		 * mirror.  		 */  		if (default_ok(m) || mirror_available(ms, bio)) { -			bd = &read_record->details; +			bd = &bio_record->details;  			dm_bio_restore(bd, bio); -			mempool_free(read_record, ms->read_record_pool); -			map_context->ptr = NULL; +			bio_record->details.bi_bdev = NULL; + +			atomic_inc(&bio->bi_remaining); +  			queue_bio(ms, bio, rw); -			return 1; +			return DM_ENDIO_INCOMPLETE;  		}  		DMERR("All replicated volumes dead, failing I/O");  	}  out: -	if (read_record) { -		mempool_free(read_record, ms->read_record_pool); -		map_context->ptr = NULL; -	} +	bio_record->details.bi_bdev = NULL;  	return error;  } @@ -1350,8 +1352,8 @@ static char device_status_char(struct mirror *m)  } -static int mirror_status(struct dm_target *ti, status_type_t type, -			 char *result, unsigned int maxlen) +static void mirror_status(struct dm_target *ti, status_type_t type, +			  unsigned status_flags, char *result, unsigned maxlen)  {  	unsigned int m, sz = 0;  	struct mirror_set *ms = (struct mirror_set *) ti->private; @@ -1386,8 +1388,6 @@ static int mirror_status(struct dm_target *ti, status_type_t type,  		if (ms->features & DM_RAID1_HANDLE_ERRORS)  			DMEMIT(" 1 handle_errors");  	} - -	return 0;  }  static int mirror_iterate_devices(struct dm_target *ti, @@ -1406,7 +1406,7 @@ static int mirror_iterate_devices(struct dm_target *ti,  static struct target_type mirror_target = {  	.name	 = "mirror", -	.version = {1, 12, 0}, +	.version = {1, 13, 2},  	.module	 = THIS_MODULE,  	.ctr	 = mirror_ctr,  	.dtr	 = mirror_dtr, @@ -1423,13 +1423,6 @@ static int __init dm_mirror_init(void)  {  	int r; -	_dm_raid1_read_record_cache = KMEM_CACHE(dm_raid1_read_record, 0); -	if (!_dm_raid1_read_record_cache) { -		DMERR("Can't allocate dm_raid1_read_record cache"); -		r = -ENOMEM; -		goto bad_cache; -	} -  	r = dm_register_target(&mirror_target);  	if (r < 0) {  		DMERR("Failed to register mirror target"); @@ -1439,15 +1432,12 @@ static int __init dm_mirror_init(void)  	return 0;  bad_target: -	kmem_cache_destroy(_dm_raid1_read_record_cache); -bad_cache:  	return r;  }  static void __exit dm_mirror_exit(void)  {  	dm_unregister_target(&mirror_target); -	kmem_cache_destroy(_dm_raid1_read_record_cache);  }  /* Module hooks */  | 
