diff options
Diffstat (limited to 'drivers/md')
98 files changed, 11821 insertions, 7746 deletions
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig index 30b426ed744..5bdedf6df15 100644 --- a/drivers/md/Kconfig +++ b/drivers/md/Kconfig @@ -176,8 +176,12 @@ config MD_FAULTY  source "drivers/md/bcache/Kconfig" +config BLK_DEV_DM_BUILTIN +	boolean +  config BLK_DEV_DM  	tristate "Device mapper support" +	select BLK_DEV_DM_BUILTIN  	---help---  	  Device-mapper is a low level volume manager.  It works by allowing  	  people to specify mappings for ranges of logical sectors.  Various @@ -238,6 +242,7 @@ config DM_CRYPT  config DM_SNAPSHOT         tristate "Snapshot target"         depends on BLK_DEV_DM +       select DM_BUFIO         ---help---           Allow volume managers to take writable snapshots of a device. @@ -249,16 +254,6 @@ config DM_THIN_PROVISIONING         ---help---           Provides thin provisioning and snapshots that share a data store. -config DM_DEBUG_BLOCK_STACK_TRACING -	boolean "Keep stack trace of thin provisioning block lock holders" -	depends on STACKTRACE_SUPPORT && DM_THIN_PROVISIONING -	select STACKTRACE -	---help--- -	  Enable this for messages that may help debug problems with the -	  block manager locking used by thin provisioning. - -	  If unsure, say N. -  config DM_CACHE         tristate "Cache target (EXPERIMENTAL)"         depends on BLK_DEV_DM @@ -290,6 +285,17 @@ config DM_CACHE_CLEANER           A simple cache policy that writes back all data to the           origin.  Used when decommissioning a dm-cache. +config DM_ERA +       tristate "Era target (EXPERIMENTAL)" +       depends on BLK_DEV_DM +       default n +       select DM_PERSISTENT_DATA +       select DM_BIO_PRISON +       ---help--- +         dm-era tracks which parts of a block device are written to +         over time.  Useful for maintaining cache coherency when using +         vendor snapshots. +  config DM_MIRROR         tristate "Mirror target"         depends on BLK_DEV_DM @@ -297,6 +303,17 @@ config DM_MIRROR           Allow volume managers to mirror logical volumes, also           needed for live data migration tools such as 'pvmove'. +config DM_LOG_USERSPACE +	tristate "Mirror userspace logging" +	depends on DM_MIRROR && NET +	select CONNECTOR +	---help--- +	  The userspace logging module provides a mechanism for +	  relaying the dm-dirty-log API to userspace.  Log designs +	  which are more suited to userspace implementation (e.g. +	  shared storage logs) or experimental logs can be implemented +	  by leveraging this framework. +  config DM_RAID         tristate "RAID 1/4/5/6/10 target"         depends on BLK_DEV_DM @@ -323,17 +340,6 @@ config DM_RAID  	 RAID-5, RAID-6 distributes the syndromes across the drives  	 in one of the available parity distribution methods. -config DM_LOG_USERSPACE -	tristate "Mirror userspace logging" -	depends on DM_MIRROR && NET -	select CONNECTOR -	---help--- -	  The userspace logging module provides a mechanism for -	  relaying the dm-dirty-log API to userspace.  Log designs -	  which are more suited to userspace implementation (e.g. -	  shared storage logs) or experimental logs can be implemented -	  by leveraging this framework. -  config DM_ZERO  	tristate "Zero target"  	depends on BLK_DEV_DM diff --git a/drivers/md/Makefile b/drivers/md/Makefile index 2acc43fe022..a2da532b1c2 100644 --- a/drivers/md/Makefile +++ b/drivers/md/Makefile @@ -14,6 +14,7 @@ dm-thin-pool-y	+= dm-thin.o dm-thin-metadata.o  dm-cache-y	+= dm-cache-target.o dm-cache-metadata.o dm-cache-policy.o  dm-cache-mq-y   += dm-cache-policy-mq.o  dm-cache-cleaner-y += dm-cache-policy-cleaner.o +dm-era-y	+= dm-era-target.o  md-mod-y	+= md.o bitmap.o  raid456-y	+= raid5.o @@ -32,6 +33,7 @@ obj-$(CONFIG_MD_FAULTY)		+= faulty.o  obj-$(CONFIG_BCACHE)		+= bcache/  obj-$(CONFIG_BLK_DEV_MD)	+= md-mod.o  obj-$(CONFIG_BLK_DEV_DM)	+= dm-mod.o +obj-$(CONFIG_BLK_DEV_DM_BUILTIN) += dm-builtin.o  obj-$(CONFIG_DM_BUFIO)		+= dm-bufio.o  obj-$(CONFIG_DM_BIO_PRISON)	+= dm-bio-prison.o  obj-$(CONFIG_DM_CRYPT)		+= dm-crypt.o @@ -52,6 +54,7 @@ obj-$(CONFIG_DM_VERITY)		+= dm-verity.o  obj-$(CONFIG_DM_CACHE)		+= dm-cache.o  obj-$(CONFIG_DM_CACHE_MQ)	+= dm-cache-mq.o  obj-$(CONFIG_DM_CACHE_CLEANER)	+= dm-cache-cleaner.o +obj-$(CONFIG_DM_ERA)		+= dm-era.o  ifeq ($(CONFIG_DM_UEVENT),y)  dm-mod-objs			+= dm-uevent.o diff --git a/drivers/md/bcache/Kconfig b/drivers/md/bcache/Kconfig index f950c9d29f3..4d200883c50 100644 --- a/drivers/md/bcache/Kconfig +++ b/drivers/md/bcache/Kconfig @@ -13,15 +13,8 @@ config BCACHE_DEBUG  	---help---  	Don't select this option unless you're a developer -	Enables extra debugging tools (primarily a fuzz tester) - -config BCACHE_EDEBUG -	bool "Extended runtime checks" -	depends on BCACHE -	---help--- -	Don't select this option unless you're a developer - -	Enables extra runtime checks which significantly affect performance +	Enables extra debugging tools, allows expensive runtime checks to be +	turned on.  config BCACHE_CLOSURES_DEBUG  	bool "Debug closures" @@ -31,11 +24,3 @@ config BCACHE_CLOSURES_DEBUG  	Keeps all active closures in a linked list and provides a debugfs  	interface to list them, which makes it possible to see asynchronous  	operations that get stuck. - -# cgroup code needs to be updated: -# -#config CGROUP_BCACHE -#	bool "Cgroup controls for bcache" -#	depends on BCACHE && BLK_CGROUP -#	---help--- -#	TODO diff --git a/drivers/md/bcache/Makefile b/drivers/md/bcache/Makefile index 0e9c82523be..c488b846f83 100644 --- a/drivers/md/bcache/Makefile +++ b/drivers/md/bcache/Makefile @@ -1,7 +1,8 @@  obj-$(CONFIG_BCACHE)	+= bcache.o -bcache-y		:= alloc.o btree.o bset.o io.o journal.o writeback.o\ -	movinggc.o request.o super.o sysfs.o debug.o util.o trace.o stats.o closure.o +bcache-y		:= alloc.o bset.o btree.o closure.o debug.o extents.o\ +	io.o journal.o movinggc.o request.o stats.o super.o sysfs.o trace.o\ +	util.o writeback.o  CFLAGS_request.o	+= -Iblock diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c index e45f5575fd4..443d03fbac4 100644 --- a/drivers/md/bcache/alloc.c +++ b/drivers/md/bcache/alloc.c @@ -63,13 +63,12 @@  #include "bcache.h"  #include "btree.h" +#include <linux/blkdev.h>  #include <linux/freezer.h>  #include <linux/kthread.h>  #include <linux/random.h>  #include <trace/events/bcache.h> -#define MAX_IN_FLIGHT_DISCARDS		8U -  /* Bucket heap / gen */  uint8_t bch_inc_gen(struct cache *ca, struct bucket *b) @@ -79,12 +78,6 @@ uint8_t bch_inc_gen(struct cache *ca, struct bucket *b)  	ca->set->need_gc = max(ca->set->need_gc, bucket_gc_gen(b));  	WARN_ON_ONCE(ca->set->need_gc > BUCKET_GC_GEN_MAX); -	if (CACHE_SYNC(&ca->set->sb)) { -		ca->need_save_prio = max(ca->need_save_prio, -					 bucket_disk_gen(b)); -		WARN_ON_ONCE(ca->need_save_prio > BUCKET_DISK_GEN_MAX); -	} -  	return ret;  } @@ -121,119 +114,63 @@ void bch_rescale_priorities(struct cache_set *c, int sectors)  	mutex_unlock(&c->bucket_lock);  } -/* Discard/TRIM */ - -struct discard { -	struct list_head	list; -	struct work_struct	work; -	struct cache		*ca; -	long			bucket; - -	struct bio		bio; -	struct bio_vec		bv; -}; - -static void discard_finish(struct work_struct *w) -{ -	struct discard *d = container_of(w, struct discard, work); -	struct cache *ca = d->ca; -	char buf[BDEVNAME_SIZE]; - -	if (!test_bit(BIO_UPTODATE, &d->bio.bi_flags)) { -		pr_notice("discard error on %s, disabling", -			 bdevname(ca->bdev, buf)); -		d->ca->discard = 0; -	} - -	mutex_lock(&ca->set->bucket_lock); - -	fifo_push(&ca->free, d->bucket); -	list_add(&d->list, &ca->discards); -	atomic_dec(&ca->discards_in_flight); - -	mutex_unlock(&ca->set->bucket_lock); - -	closure_wake_up(&ca->set->bucket_wait); -	wake_up_process(ca->alloc_thread); - -	closure_put(&ca->set->cl); -} - -static void discard_endio(struct bio *bio, int error) -{ -	struct discard *d = container_of(bio, struct discard, bio); -	schedule_work(&d->work); -} - -static void do_discard(struct cache *ca, long bucket) -{ -	struct discard *d = list_first_entry(&ca->discards, -					     struct discard, list); - -	list_del(&d->list); -	d->bucket = bucket; - -	atomic_inc(&ca->discards_in_flight); -	closure_get(&ca->set->cl); - -	bio_init(&d->bio); - -	d->bio.bi_sector	= bucket_to_sector(ca->set, d->bucket); -	d->bio.bi_bdev		= ca->bdev; -	d->bio.bi_rw		= REQ_WRITE|REQ_DISCARD; -	d->bio.bi_max_vecs	= 1; -	d->bio.bi_io_vec	= d->bio.bi_inline_vecs; -	d->bio.bi_size		= bucket_bytes(ca); -	d->bio.bi_end_io	= discard_endio; -	bio_set_prio(&d->bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); - -	submit_bio(0, &d->bio); -} - -/* Allocation */ +/* + * Background allocation thread: scans for buckets to be invalidated, + * invalidates them, rewrites prios/gens (marking them as invalidated on disk), + * then optionally issues discard commands to the newly free buckets, then puts + * them on the various freelists. + */  static inline bool can_inc_bucket_gen(struct bucket *b)  { -	return bucket_gc_gen(b) < BUCKET_GC_GEN_MAX && -		bucket_disk_gen(b) < BUCKET_DISK_GEN_MAX; +	return bucket_gc_gen(b) < BUCKET_GC_GEN_MAX;  } -bool bch_bucket_add_unused(struct cache *ca, struct bucket *b) +bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b)  { -	BUG_ON(GC_MARK(b) || GC_SECTORS_USED(b)); +	BUG_ON(!ca->set->gc_mark_valid); -	if (fifo_used(&ca->free) > ca->watermark[WATERMARK_MOVINGGC] && -	    CACHE_REPLACEMENT(&ca->sb) == CACHE_REPLACEMENT_FIFO) -		return false; - -	b->prio = 0; - -	if (can_inc_bucket_gen(b) && -	    fifo_push(&ca->unused, b - ca->buckets)) { -		atomic_inc(&b->pin); -		return true; -	} - -	return false; -} - -static bool can_invalidate_bucket(struct cache *ca, struct bucket *b) -{ -	return GC_MARK(b) == GC_MARK_RECLAIMABLE && +	return (!GC_MARK(b) || +		GC_MARK(b) == GC_MARK_RECLAIMABLE) &&  		!atomic_read(&b->pin) &&  		can_inc_bucket_gen(b);  } -static void invalidate_one_bucket(struct cache *ca, struct bucket *b) +void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)  { +	lockdep_assert_held(&ca->set->bucket_lock); +	BUG_ON(GC_MARK(b) && GC_MARK(b) != GC_MARK_RECLAIMABLE); + +	if (GC_SECTORS_USED(b)) +		trace_bcache_invalidate(ca, b - ca->buckets); +  	bch_inc_gen(ca, b);  	b->prio = INITIAL_PRIO;  	atomic_inc(&b->pin); +} + +static void bch_invalidate_one_bucket(struct cache *ca, struct bucket *b) +{ +	__bch_invalidate_one_bucket(ca, b); +  	fifo_push(&ca->free_inc, b - ca->buckets);  } -#define bucket_prio(b)				\ -	(((unsigned) (b->prio - ca->set->min_prio)) * GC_SECTORS_USED(b)) +/* + * Determines what order we're going to reuse buckets, smallest bucket_prio() + * first: we also take into account the number of sectors of live data in that + * bucket, and in order for that multiply to make sense we have to scale bucket + * + * Thus, we scale the bucket priorities so that the bucket with the smallest + * prio is worth 1/8th of what INITIAL_PRIO is worth. + */ + +#define bucket_prio(b)							\ +({									\ +	unsigned min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8;	\ +									\ +	(b->prio - ca->set->min_prio + min_prio) * GC_SECTORS_USED(b);	\ +})  #define bucket_max_cmp(l, r)	(bucket_prio(l) < bucket_prio(r))  #define bucket_min_cmp(l, r)	(bucket_prio(l) > bucket_prio(r)) @@ -246,20 +183,7 @@ static void invalidate_buckets_lru(struct cache *ca)  	ca->heap.used = 0;  	for_each_bucket(b, ca) { -		/* -		 * If we fill up the unused list, if we then return before -		 * adding anything to the free_inc list we'll skip writing -		 * prios/gens and just go back to allocating from the unused -		 * list: -		 */ -		if (fifo_full(&ca->unused)) -			return; - -		if (!can_invalidate_bucket(ca, b)) -			continue; - -		if (!GC_SECTORS_USED(b) && -		    bch_bucket_add_unused(ca, b)) +		if (!bch_can_invalidate_bucket(ca, b))  			continue;  		if (!heap_full(&ca->heap)) @@ -280,11 +204,11 @@ static void invalidate_buckets_lru(struct cache *ca)  			 * multiple times when it can't do anything  			 */  			ca->invalidate_needs_gc = 1; -			bch_queue_gc(ca->set); +			wake_up_gc(ca->set);  			return;  		} -		invalidate_one_bucket(ca, b); +		bch_invalidate_one_bucket(ca, b);  	}  } @@ -300,12 +224,12 @@ static void invalidate_buckets_fifo(struct cache *ca)  		b = ca->buckets + ca->fifo_last_bucket++; -		if (can_invalidate_bucket(ca, b)) -			invalidate_one_bucket(ca, b); +		if (bch_can_invalidate_bucket(ca, b)) +			bch_invalidate_one_bucket(ca, b);  		if (++checked >= ca->sb.nbuckets) {  			ca->invalidate_needs_gc = 1; -			bch_queue_gc(ca->set); +			wake_up_gc(ca->set);  			return;  		}  	} @@ -325,12 +249,12 @@ static void invalidate_buckets_random(struct cache *ca)  		b = ca->buckets + n; -		if (can_invalidate_bucket(ca, b)) -			invalidate_one_bucket(ca, b); +		if (bch_can_invalidate_bucket(ca, b)) +			bch_invalidate_one_bucket(ca, b);  		if (++checked >= ca->sb.nbuckets / 2) {  			ca->invalidate_needs_gc = 1; -			bch_queue_gc(ca->set); +			wake_up_gc(ca->set);  			return;  		}  	} @@ -338,8 +262,7 @@ static void invalidate_buckets_random(struct cache *ca)  static void invalidate_buckets(struct cache *ca)  { -	if (ca->invalidate_needs_gc) -		return; +	BUG_ON(ca->invalidate_needs_gc);  	switch (CACHE_REPLACEMENT(&ca->sb)) {  	case CACHE_REPLACEMENT_LRU: @@ -352,8 +275,6 @@ static void invalidate_buckets(struct cache *ca)  		invalidate_buckets_random(ca);  		break;  	} - -	trace_bcache_alloc_invalidate(ca);  }  #define allocator_wait(ca, cond)					\ @@ -374,6 +295,21 @@ do {									\  	__set_current_state(TASK_RUNNING);				\  } while (0) +static int bch_allocator_push(struct cache *ca, long bucket) +{ +	unsigned i; + +	/* Prios/gens are actually the most important reserve */ +	if (fifo_push(&ca->free[RESERVE_PRIO], bucket)) +		return true; + +	for (i = 0; i < RESERVE_NR; i++) +		if (fifo_push(&ca->free[i], bucket)) +			return true; + +	return false; +} +  static int bch_allocator_thread(void *arg)  {  	struct cache *ca = arg; @@ -386,28 +322,22 @@ static int bch_allocator_thread(void *arg)  		 * possibly issue discards to them, then we add the bucket to  		 * the free list:  		 */ -		while (1) { +		while (!fifo_empty(&ca->free_inc)) {  			long bucket; -			if ((!atomic_read(&ca->set->prio_blocked) || -			     !CACHE_SYNC(&ca->set->sb)) && -			    !fifo_empty(&ca->unused)) -				fifo_pop(&ca->unused, bucket); -			else if (!fifo_empty(&ca->free_inc)) -				fifo_pop(&ca->free_inc, bucket); -			else -				break; - -			allocator_wait(ca, (int) fifo_free(&ca->free) > -				       atomic_read(&ca->discards_in_flight)); +			fifo_pop(&ca->free_inc, bucket);  			if (ca->discard) { -				allocator_wait(ca, !list_empty(&ca->discards)); -				do_discard(ca, bucket); -			} else { -				fifo_push(&ca->free, bucket); -				closure_wake_up(&ca->set->bucket_wait); +				mutex_unlock(&ca->set->bucket_lock); +				blkdev_issue_discard(ca->bdev, +					bucket_to_sector(ca->set, bucket), +					ca->sb.block_size, GFP_KERNEL, 0); +				mutex_lock(&ca->set->bucket_lock);  			} + +			allocator_wait(ca, bch_allocator_push(ca, bucket)); +			wake_up(&ca->set->btree_cache_wait); +			wake_up(&ca->set->bucket_wait);  		}  		/* @@ -416,9 +346,9 @@ static int bch_allocator_thread(void *arg)  		 * them to the free_inc list:  		 */ +retry_invalidate:  		allocator_wait(ca, ca->set->gc_mark_valid && -			       (ca->need_save_prio > 64 || -				!ca->invalidate_needs_gc)); +			       !ca->invalidate_needs_gc);  		invalidate_buckets(ca);  		/* @@ -426,82 +356,111 @@ static int bch_allocator_thread(void *arg)  		 * new stuff to them:  		 */  		allocator_wait(ca, !atomic_read(&ca->set->prio_blocked)); -		if (CACHE_SYNC(&ca->set->sb) && -		    (!fifo_empty(&ca->free_inc) || -		     ca->need_save_prio > 64)) +		if (CACHE_SYNC(&ca->set->sb)) { +			/* +			 * This could deadlock if an allocation with a btree +			 * node locked ever blocked - having the btree node +			 * locked would block garbage collection, but here we're +			 * waiting on garbage collection before we invalidate +			 * and free anything. +			 * +			 * But this should be safe since the btree code always +			 * uses btree_check_reserve() before allocating now, and +			 * if it fails it blocks without btree nodes locked. +			 */ +			if (!fifo_full(&ca->free_inc)) +				goto retry_invalidate; +  			bch_prio_write(ca); +		}  	}  } -long bch_bucket_alloc(struct cache *ca, unsigned watermark, struct closure *cl) +/* Allocation */ + +long bch_bucket_alloc(struct cache *ca, unsigned reserve, bool wait)  { -	long r = -1; -again: +	DEFINE_WAIT(w); +	struct bucket *b; +	long r; + +	/* fastpath */ +	if (fifo_pop(&ca->free[RESERVE_NONE], r) || +	    fifo_pop(&ca->free[reserve], r)) +		goto out; + +	if (!wait) { +		trace_bcache_alloc_fail(ca, reserve); +		return -1; +	} + +	do { +		prepare_to_wait(&ca->set->bucket_wait, &w, +				TASK_UNINTERRUPTIBLE); + +		mutex_unlock(&ca->set->bucket_lock); +		schedule(); +		mutex_lock(&ca->set->bucket_lock); +	} while (!fifo_pop(&ca->free[RESERVE_NONE], r) && +		 !fifo_pop(&ca->free[reserve], r)); + +	finish_wait(&ca->set->bucket_wait, &w); +out:  	wake_up_process(ca->alloc_thread); -	if (fifo_used(&ca->free) > ca->watermark[watermark] && -	    fifo_pop(&ca->free, r)) { -		struct bucket *b = ca->buckets + r; -#ifdef CONFIG_BCACHE_EDEBUG +	trace_bcache_alloc(ca, reserve); + +	if (expensive_debug_checks(ca->set)) {  		size_t iter;  		long i; +		unsigned j;  		for (iter = 0; iter < prio_buckets(ca) * 2; iter++)  			BUG_ON(ca->prio_buckets[iter] == (uint64_t) r); -		fifo_for_each(i, &ca->free, iter) -			BUG_ON(i == r); +		for (j = 0; j < RESERVE_NR; j++) +			fifo_for_each(i, &ca->free[j], iter) +				BUG_ON(i == r);  		fifo_for_each(i, &ca->free_inc, iter)  			BUG_ON(i == r); -		fifo_for_each(i, &ca->unused, iter) -			BUG_ON(i == r); -#endif -		BUG_ON(atomic_read(&b->pin) != 1); - -		SET_GC_SECTORS_USED(b, ca->sb.bucket_size); - -		if (watermark <= WATERMARK_METADATA) { -			SET_GC_MARK(b, GC_MARK_METADATA); -			b->prio = BTREE_PRIO; -		} else { -			SET_GC_MARK(b, GC_MARK_RECLAIMABLE); -			b->prio = INITIAL_PRIO; -		} - -		return r;  	} -	trace_bcache_alloc_fail(ca); +	b = ca->buckets + r; -	if (cl) { -		closure_wait(&ca->set->bucket_wait, cl); +	BUG_ON(atomic_read(&b->pin) != 1); -		if (closure_blocking(cl)) { -			mutex_unlock(&ca->set->bucket_lock); -			closure_sync(cl); -			mutex_lock(&ca->set->bucket_lock); -			goto again; -		} +	SET_GC_SECTORS_USED(b, ca->sb.bucket_size); + +	if (reserve <= RESERVE_PRIO) { +		SET_GC_MARK(b, GC_MARK_METADATA); +		SET_GC_MOVE(b, 0); +		b->prio = BTREE_PRIO; +	} else { +		SET_GC_MARK(b, GC_MARK_RECLAIMABLE); +		SET_GC_MOVE(b, 0); +		b->prio = INITIAL_PRIO;  	} -	return -1; +	return r; +} + +void __bch_bucket_free(struct cache *ca, struct bucket *b) +{ +	SET_GC_MARK(b, 0); +	SET_GC_SECTORS_USED(b, 0);  }  void bch_bucket_free(struct cache_set *c, struct bkey *k)  {  	unsigned i; -	for (i = 0; i < KEY_PTRS(k); i++) { -		struct bucket *b = PTR_BUCKET(c, k, i); - -		SET_GC_MARK(b, GC_MARK_RECLAIMABLE); -		SET_GC_SECTORS_USED(b, 0); -		bch_bucket_add_unused(PTR_CACHE(c, k, i), b); -	} +	for (i = 0; i < KEY_PTRS(k); i++) +		__bch_bucket_free(PTR_CACHE(c, k, i), +				  PTR_BUCKET(c, k, i));  } -int __bch_bucket_alloc_set(struct cache_set *c, unsigned watermark, -			   struct bkey *k, int n, struct closure *cl) +int __bch_bucket_alloc_set(struct cache_set *c, unsigned reserve, +			   struct bkey *k, int n, bool wait)  {  	int i; @@ -514,7 +473,7 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned watermark,  	for (i = 0; i < n; i++) {  		struct cache *ca = c->cache_by_alloc[i]; -		long b = bch_bucket_alloc(ca, watermark, cl); +		long b = bch_bucket_alloc(ca, reserve, wait);  		if (b == -1)  			goto err; @@ -529,75 +488,209 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned watermark,  	return 0;  err:  	bch_bucket_free(c, k); -	__bkey_put(c, k); +	bkey_put(c, k);  	return -1;  } -int bch_bucket_alloc_set(struct cache_set *c, unsigned watermark, -			 struct bkey *k, int n, struct closure *cl) +int bch_bucket_alloc_set(struct cache_set *c, unsigned reserve, +			 struct bkey *k, int n, bool wait)  {  	int ret;  	mutex_lock(&c->bucket_lock); -	ret = __bch_bucket_alloc_set(c, watermark, k, n, cl); +	ret = __bch_bucket_alloc_set(c, reserve, k, n, wait);  	mutex_unlock(&c->bucket_lock);  	return ret;  } -/* Init */ +/* Sector allocator */ -int bch_cache_allocator_start(struct cache *ca) +struct open_bucket { +	struct list_head	list; +	unsigned		last_write_point; +	unsigned		sectors_free; +	BKEY_PADDED(key); +}; + +/* + * We keep multiple buckets open for writes, and try to segregate different + * write streams for better cache utilization: first we look for a bucket where + * the last write to it was sequential with the current write, and failing that + * we look for a bucket that was last used by the same task. + * + * The ideas is if you've got multiple tasks pulling data into the cache at the + * same time, you'll get better cache utilization if you try to segregate their + * data and preserve locality. + * + * For example, say you've starting Firefox at the same time you're copying a + * bunch of files. Firefox will likely end up being fairly hot and stay in the + * cache awhile, but the data you copied might not be; if you wrote all that + * data to the same buckets it'd get invalidated at the same time. + * + * Both of those tasks will be doing fairly random IO so we can't rely on + * detecting sequential IO to segregate their data, but going off of the task + * should be a sane heuristic. + */ +static struct open_bucket *pick_data_bucket(struct cache_set *c, +					    const struct bkey *search, +					    unsigned write_point, +					    struct bkey *alloc)  { -	struct task_struct *k = kthread_run(bch_allocator_thread, -					    ca, "bcache_allocator"); -	if (IS_ERR(k)) -		return PTR_ERR(k); +	struct open_bucket *ret, *ret_task = NULL; + +	list_for_each_entry_reverse(ret, &c->data_buckets, list) +		if (!bkey_cmp(&ret->key, search)) +			goto found; +		else if (ret->last_write_point == write_point) +			ret_task = ret; + +	ret = ret_task ?: list_first_entry(&c->data_buckets, +					   struct open_bucket, list); +found: +	if (!ret->sectors_free && KEY_PTRS(alloc)) { +		ret->sectors_free = c->sb.bucket_size; +		bkey_copy(&ret->key, alloc); +		bkey_init(alloc); +	} -	ca->alloc_thread = k; -	return 0; +	if (!ret->sectors_free) +		ret = NULL; + +	return ret;  } -void bch_cache_allocator_exit(struct cache *ca) +/* + * Allocates some space in the cache to write to, and k to point to the newly + * allocated space, and updates KEY_SIZE(k) and KEY_OFFSET(k) (to point to the + * end of the newly allocated space). + * + * May allocate fewer sectors than @sectors, KEY_SIZE(k) indicates how many + * sectors were actually allocated. + * + * If s->writeback is true, will not fail. + */ +bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, unsigned sectors, +		       unsigned write_point, unsigned write_prio, bool wait)  { -	struct discard *d; +	struct open_bucket *b; +	BKEY_PADDED(key) alloc; +	unsigned i; + +	/* +	 * We might have to allocate a new bucket, which we can't do with a +	 * spinlock held. So if we have to allocate, we drop the lock, allocate +	 * and then retry. KEY_PTRS() indicates whether alloc points to +	 * allocated bucket(s). +	 */ -	while (!list_empty(&ca->discards)) { -		d = list_first_entry(&ca->discards, struct discard, list); -		cancel_work_sync(&d->work); -		list_del(&d->list); -		kfree(d); +	bkey_init(&alloc.key); +	spin_lock(&c->data_bucket_lock); + +	while (!(b = pick_data_bucket(c, k, write_point, &alloc.key))) { +		unsigned watermark = write_prio +			? RESERVE_MOVINGGC +			: RESERVE_NONE; + +		spin_unlock(&c->data_bucket_lock); + +		if (bch_bucket_alloc_set(c, watermark, &alloc.key, 1, wait)) +			return false; + +		spin_lock(&c->data_bucket_lock);  	} -} -int bch_cache_allocator_init(struct cache *ca) -{ -	unsigned i; +	/* +	 * If we had to allocate, we might race and not need to allocate the +	 * second time we call find_data_bucket(). If we allocated a bucket but +	 * didn't use it, drop the refcount bch_bucket_alloc_set() took: +	 */ +	if (KEY_PTRS(&alloc.key)) +		bkey_put(c, &alloc.key); + +	for (i = 0; i < KEY_PTRS(&b->key); i++) +		EBUG_ON(ptr_stale(c, &b->key, i)); + +	/* Set up the pointer to the space we're allocating: */ + +	for (i = 0; i < KEY_PTRS(&b->key); i++) +		k->ptr[i] = b->key.ptr[i]; + +	sectors = min(sectors, b->sectors_free); + +	SET_KEY_OFFSET(k, KEY_OFFSET(k) + sectors); +	SET_KEY_SIZE(k, sectors); +	SET_KEY_PTRS(k, KEY_PTRS(&b->key)); + +	/* +	 * Move b to the end of the lru, and keep track of what this bucket was +	 * last used for: +	 */ +	list_move_tail(&b->list, &c->data_buckets); +	bkey_copy_key(&b->key, k); +	b->last_write_point = write_point; + +	b->sectors_free	-= sectors; + +	for (i = 0; i < KEY_PTRS(&b->key); i++) { +		SET_PTR_OFFSET(&b->key, i, PTR_OFFSET(&b->key, i) + sectors); + +		atomic_long_add(sectors, +				&PTR_CACHE(c, &b->key, i)->sectors_written); +	} + +	if (b->sectors_free < c->sb.block_size) +		b->sectors_free = 0;  	/* -	 * Reserve: -	 * Prio/gen writes first -	 * Then 8 for btree allocations -	 * Then half for the moving garbage collector +	 * k takes refcounts on the buckets it points to until it's inserted +	 * into the btree, but if we're done with this bucket we just transfer +	 * get_data_bucket()'s refcount.  	 */ +	if (b->sectors_free) +		for (i = 0; i < KEY_PTRS(&b->key); i++) +			atomic_inc(&PTR_BUCKET(c, &b->key, i)->pin); -	ca->watermark[WATERMARK_PRIO] = 0; +	spin_unlock(&c->data_bucket_lock); +	return true; +} -	ca->watermark[WATERMARK_METADATA] = prio_buckets(ca); +/* Init */ -	ca->watermark[WATERMARK_MOVINGGC] = 8 + -		ca->watermark[WATERMARK_METADATA]; +void bch_open_buckets_free(struct cache_set *c) +{ +	struct open_bucket *b; -	ca->watermark[WATERMARK_NONE] = ca->free.size / 2 + -		ca->watermark[WATERMARK_MOVINGGC]; +	while (!list_empty(&c->data_buckets)) { +		b = list_first_entry(&c->data_buckets, +				     struct open_bucket, list); +		list_del(&b->list); +		kfree(b); +	} +} -	for (i = 0; i < MAX_IN_FLIGHT_DISCARDS; i++) { -		struct discard *d = kzalloc(sizeof(*d), GFP_KERNEL); -		if (!d) +int bch_open_buckets_alloc(struct cache_set *c) +{ +	int i; + +	spin_lock_init(&c->data_bucket_lock); + +	for (i = 0; i < 6; i++) { +		struct open_bucket *b = kzalloc(sizeof(*b), GFP_KERNEL); +		if (!b)  			return -ENOMEM; -		d->ca = ca; -		INIT_WORK(&d->work, discard_finish); -		list_add(&d->list, &ca->discards); +		list_add(&b->list, &c->data_buckets);  	}  	return 0;  } + +int bch_cache_allocator_start(struct cache *ca) +{ +	struct task_struct *k = kthread_run(bch_allocator_thread, +					    ca, "bcache_allocator"); +	if (IS_ERR(k)) +		return PTR_ERR(k); + +	ca->alloc_thread = k; +	return 0; +} diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index b39f6f0b45f..d2ebcf32309 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -177,6 +177,7 @@  #define pr_fmt(fmt) "bcache: %s() " fmt "\n", __func__ +#include <linux/bcache.h>  #include <linux/bio.h>  #include <linux/kobject.h>  #include <linux/list.h> @@ -186,6 +187,7 @@  #include <linux/types.h>  #include <linux/workqueue.h> +#include "bset.h"  #include "util.h"  #include "closure.h" @@ -193,10 +195,8 @@ struct bucket {  	atomic_t	pin;  	uint16_t	prio;  	uint8_t		gen; -	uint8_t		disk_gen;  	uint8_t		last_gc; /* Most out of date gen in the btree */ -	uint8_t		gc_gen; -	uint16_t	gc_mark; +	uint16_t	gc_mark; /* Bitfield used by GC. See below for field */  };  /* @@ -205,172 +205,13 @@ struct bucket {   */  BITMASK(GC_MARK,	 struct bucket, gc_mark, 0, 2); -#define GC_MARK_RECLAIMABLE	0 -#define GC_MARK_DIRTY		1 -#define GC_MARK_METADATA	2 -BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, 14); - -struct bkey { -	uint64_t	high; -	uint64_t	low; -	uint64_t	ptr[]; -}; - -/* Enough for a key with 6 pointers */ -#define BKEY_PAD		8 - -#define BKEY_PADDED(key)					\ -	union { struct bkey key; uint64_t key ## _pad[BKEY_PAD]; } - -/* Version 0: Cache device - * Version 1: Backing device - * Version 2: Seed pointer into btree node checksum - * Version 3: Cache device with new UUID format - * Version 4: Backing device with data offset - */ -#define BCACHE_SB_VERSION_CDEV			0 -#define BCACHE_SB_VERSION_BDEV			1 -#define BCACHE_SB_VERSION_CDEV_WITH_UUID	3 -#define BCACHE_SB_VERSION_BDEV_WITH_OFFSET	4 -#define BCACHE_SB_MAX_VERSION			4 - -#define SB_SECTOR		8 -#define SB_SIZE			4096 -#define SB_LABEL_SIZE		32 -#define SB_JOURNAL_BUCKETS	256U -/* SB_JOURNAL_BUCKETS must be divisible by BITS_PER_LONG */ -#define MAX_CACHES_PER_SET	8 - -#define BDEV_DATA_START_DEFAULT	16	/* sectors */ - -struct cache_sb { -	uint64_t		csum; -	uint64_t		offset;	/* sector where this sb was written */ -	uint64_t		version; - -	uint8_t			magic[16]; - -	uint8_t			uuid[16]; -	union { -		uint8_t		set_uuid[16]; -		uint64_t	set_magic; -	}; -	uint8_t			label[SB_LABEL_SIZE]; - -	uint64_t		flags; -	uint64_t		seq; -	uint64_t		pad[8]; - -	union { -	struct { -		/* Cache devices */ -		uint64_t	nbuckets;	/* device size */ - -		uint16_t	block_size;	/* sectors */ -		uint16_t	bucket_size;	/* sectors */ - -		uint16_t	nr_in_set; -		uint16_t	nr_this_dev; -	}; -	struct { -		/* Backing devices */ -		uint64_t	data_offset; - -		/* -		 * block_size from the cache device section is still used by -		 * backing devices, so don't add anything here until we fix -		 * things to not need it for backing devices anymore -		 */ -	}; -	}; - -	uint32_t		last_mount;	/* time_t */ - -	uint16_t		first_bucket; -	union { -		uint16_t	njournal_buckets; -		uint16_t	keys; -	}; -	uint64_t		d[SB_JOURNAL_BUCKETS];	/* journal buckets */ -}; - -BITMASK(CACHE_SYNC,		struct cache_sb, flags, 0, 1); -BITMASK(CACHE_DISCARD,		struct cache_sb, flags, 1, 1); -BITMASK(CACHE_REPLACEMENT,	struct cache_sb, flags, 2, 3); -#define CACHE_REPLACEMENT_LRU	0U -#define CACHE_REPLACEMENT_FIFO	1U -#define CACHE_REPLACEMENT_RANDOM 2U - -BITMASK(BDEV_CACHE_MODE,	struct cache_sb, flags, 0, 4); -#define CACHE_MODE_WRITETHROUGH	0U -#define CACHE_MODE_WRITEBACK	1U -#define CACHE_MODE_WRITEAROUND	2U -#define CACHE_MODE_NONE		3U -BITMASK(BDEV_STATE,		struct cache_sb, flags, 61, 2); -#define BDEV_STATE_NONE		0U -#define BDEV_STATE_CLEAN	1U -#define BDEV_STATE_DIRTY	2U -#define BDEV_STATE_STALE	3U - -/* Version 1: Seed pointer into btree node checksum - */ -#define BCACHE_BSET_VERSION	1 - -/* - * This is the on disk format for btree nodes - a btree node on disk is a list - * of these; within each set the keys are sorted - */ -struct bset { -	uint64_t		csum; -	uint64_t		magic; -	uint64_t		seq; -	uint32_t		version; -	uint32_t		keys; - -	union { -		struct bkey	start[0]; -		uint64_t	d[0]; -	}; -}; - -/* - * On disk format for priorities and gens - see super.c near prio_write() for - * more. - */ -struct prio_set { -	uint64_t		csum; -	uint64_t		magic; -	uint64_t		seq; -	uint32_t		version; -	uint32_t		pad; - -	uint64_t		next_bucket; - -	struct bucket_disk { -		uint16_t	prio; -		uint8_t		gen; -	} __attribute((packed)) data[]; -}; - -struct uuid_entry { -	union { -		struct { -			uint8_t		uuid[16]; -			uint8_t		label[32]; -			uint32_t	first_reg; -			uint32_t	last_reg; -			uint32_t	invalidated; - -			uint32_t	flags; -			/* Size of flash only volumes */ -			uint64_t	sectors; -		}; - -		uint8_t	pad[128]; -	}; -}; - -BITMASK(UUID_FLASH_ONLY,	struct uuid_entry, flags, 0, 1); +#define GC_MARK_RECLAIMABLE	1 +#define GC_MARK_DIRTY		2 +#define GC_MARK_METADATA	3 +#define GC_SECTORS_USED_SIZE	13 +#define MAX_GC_SECTORS_USED	(~(~0ULL << GC_SECTORS_USED_SIZE)) +BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, GC_SECTORS_USED_SIZE); +BITMASK(GC_MOVE, struct bucket, gc_mark, 15, 1);  #include "journal.h"  #include "stats.h" @@ -384,8 +225,6 @@ struct keybuf_key {  	void			*private;  }; -typedef bool (keybuf_pred_fn)(struct keybuf *, struct bkey *); -  struct keybuf {  	struct bkey		last_scanned;  	spinlock_t		lock; @@ -400,7 +239,7 @@ struct keybuf {  	struct rb_root		keys; -#define KEYBUF_NR		100 +#define KEYBUF_NR		500  	DECLARE_ARRAY_ALLOCATOR(struct keybuf_key, freelist, KEYBUF_NR);  }; @@ -429,21 +268,19 @@ struct bcache_device {  	struct gendisk		*disk; -	/* If nonzero, we're closing */ -	atomic_t		closing; - -	/* If nonzero, we're detaching/unregistering from cache set */ -	atomic_t		detaching; -	int			flush_done; +	unsigned long		flags; +#define BCACHE_DEV_CLOSING	0 +#define BCACHE_DEV_DETACHING	1 +#define BCACHE_DEV_UNLINK_DONE	2 -	uint64_t		nr_stripes; -	unsigned		stripe_size_bits; +	unsigned		nr_stripes; +	unsigned		stripe_size;  	atomic_t		*stripe_sectors_dirty; +	unsigned long		*full_dirty_stripes;  	unsigned long		sectors_dirty_last;  	long			sectors_dirty_derivative; -	mempool_t		*unaligned_bvec;  	struct bio_set		*bio_split;  	unsigned		data_csum:1; @@ -473,7 +310,8 @@ struct cached_dev {  	struct cache_sb		sb;  	struct bio		sb_bio;  	struct bio_vec		sb_bv[1]; -	struct closure_with_waitlist sb_write; +	struct closure		sb_write; +	struct semaphore	sb_write_mutex;  	/* Refcount on the cache set. Always nonzero when we're caching. */  	atomic_t		count; @@ -498,7 +336,7 @@ struct cached_dev {  	 */  	atomic_t		has_dirty; -	struct ratelimit	writeback_rate; +	struct bch_ratelimit	writeback_rate;  	struct delayed_work	writeback_rate_update;  	/* @@ -507,10 +345,9 @@ struct cached_dev {  	 */  	sector_t		last_read; -	/* Number of writeback bios in flight */ -	atomic_t		in_flight; -	struct closure_with_timer writeback; -	struct closure_waitlist	writeback_wait; +	/* Limit number of writeback bios in flight */ +	struct semaphore	in_flight; +	struct task_struct	*writeback_thread;  	struct keybuf		writeback_keys; @@ -528,8 +365,8 @@ struct cached_dev {  	unsigned		sequential_cutoff;  	unsigned		readahead; -	unsigned		sequential_merge:1;  	unsigned		verify:1; +	unsigned		bypass_torture_test:1;  	unsigned		partial_stripes_expensive:1;  	unsigned		writeback_metadata:1; @@ -537,22 +374,22 @@ struct cached_dev {  	unsigned char		writeback_percent;  	unsigned		writeback_delay; -	int			writeback_rate_change; -	int64_t			writeback_rate_derivative;  	uint64_t		writeback_rate_target; +	int64_t			writeback_rate_proportional; +	int64_t			writeback_rate_derivative; +	int64_t			writeback_rate_change;  	unsigned		writeback_rate_update_seconds;  	unsigned		writeback_rate_d_term;  	unsigned		writeback_rate_p_term_inverse; -	unsigned		writeback_rate_d_smooth;  }; -enum alloc_watermarks { -	WATERMARK_PRIO, -	WATERMARK_METADATA, -	WATERMARK_MOVINGGC, -	WATERMARK_NONE, -	WATERMARK_MAX +enum alloc_reserve { +	RESERVE_BTREE, +	RESERVE_PRIO, +	RESERVE_MOVINGGC, +	RESERVE_NONE, +	RESERVE_NR,  };  struct cache { @@ -564,8 +401,6 @@ struct cache {  	struct kobject		kobj;  	struct block_device	*bdev; -	unsigned		watermark[WATERMARK_MAX]; -  	struct task_struct	*alloc_thread;  	struct closure		prio; @@ -589,14 +424,9 @@ struct cache {  	 * their new gen to disk. After prio_write() finishes writing the new  	 * gens/prios, they'll be moved to the free list (and possibly discarded  	 * in the process) -	 * -	 * unused: GC found nothing pointing into these buckets (possibly -	 * because all the data they contained was overwritten), so we only -	 * need to discard them before they can be moved to the free list.  	 */ -	DECLARE_FIFO(long, free); +	DECLARE_FIFO(long, free)[RESERVE_NR];  	DECLARE_FIFO(long, free_inc); -	DECLARE_FIFO(long, unused);  	size_t			fifo_last_bucket; @@ -606,13 +436,6 @@ struct cache {  	DECLARE_HEAP(struct bucket *, heap);  	/* -	 * max(gen - disk_gen) for all buckets. When it gets too big we have to -	 * call prio_write() to keep gens from wrapping. -	 */ -	uint8_t			need_save_prio; -	unsigned		gc_move_threshold; - -	/*  	 * If nonzero, we know we aren't going to find any buckets to invalidate  	 * until a gc finishes - otherwise we could pointlessly burn a ton of  	 * cpu @@ -621,15 +444,6 @@ struct cache {  	bool			discard; /* Get rid of? */ -	/* -	 * We preallocate structs for issuing discards to buckets, and keep them -	 * on this list when they're not in use; do_discard() issues discards -	 * whenever there's work to do and is called by free_some_buckets() and -	 * when a discard finishes. -	 */ -	atomic_t		discards_in_flight; -	struct list_head	discards; -  	struct journal_device	journal;  	/* The rest of this all shows up in sysfs */ @@ -650,7 +464,6 @@ struct gc_stat {  	size_t			nkeys;  	uint64_t		data;	/* sectors */ -	uint64_t		dirty;	/* sectors */  	unsigned		in_use; /* percent */  }; @@ -690,7 +503,8 @@ struct cache_set {  	uint64_t		cached_dev_sectors;  	struct closure		caching; -	struct closure_with_waitlist sb_write; +	struct closure		sb_write; +	struct semaphore	sb_write_mutex;  	mempool_t		*search;  	mempool_t		*bio_meta; @@ -735,19 +549,16 @@ struct cache_set {  	struct list_head	btree_cache_freed;  	/* Number of elements in btree_cache + btree_cache_freeable lists */ -	unsigned		bucket_cache_used; +	unsigned		btree_cache_used;  	/*  	 * If we need to allocate memory for a new btree node and that  	 * allocation fails, we can cannibalize another node in the btree cache -	 * to satisfy the allocation. However, only one thread can be doing this -	 * at a time, for obvious reasons - try_harder and try_wait are -	 * basically a lock for this that we can wait on asynchronously. The -	 * btree_root() macro releases the lock when it returns. +	 * to satisfy the allocation - lock to guarantee only one thread does +	 * this at a time:  	 */ -	struct closure		*try_harder; -	struct closure_waitlist	try_wait; -	uint64_t		try_harder_start; +	wait_queue_head_t	btree_cache_wait; +	struct task_struct	*btree_cache_alloc_lock;  	/*  	 * When we free a btree node, we increment the gen of the bucket the @@ -760,7 +571,7 @@ struct cache_set {  	 * written.  	 */  	atomic_t		prio_blocked; -	struct closure_waitlist	bucket_wait; +	wait_queue_head_t	bucket_wait;  	/*  	 * For any bio we don't skip we subtract the number of sectors from @@ -776,14 +587,14 @@ struct cache_set {  	uint16_t		min_prio;  	/* -	 * max(gen - gc_gen) for all buckets. When it gets too big we have to gc +	 * max(gen - last_gc) for all buckets. When it gets too big we have to gc  	 * to keep gens from wrapping around.  	 */  	uint8_t			need_gc;  	struct gc_stat		gc_stats;  	size_t			nbuckets; -	struct closure_with_waitlist gc; +	struct task_struct	*gc_thread;  	/* Where in the btree gc currently is */  	struct bkey		gc_done; @@ -796,23 +607,26 @@ struct cache_set {  	/* Counts how many sectors bio_insert has added to the cache */  	atomic_t		sectors_to_gc; -	struct closure		moving_gc; -	struct closure_waitlist	moving_gc_wait; +	wait_queue_head_t	moving_gc_wait;  	struct keybuf		moving_gc_keys;  	/* Number of moving GC bios in flight */ -	atomic_t		in_flight; +	struct semaphore	moving_in_flight; + +	struct workqueue_struct	*moving_gc_wq;  	struct btree		*root;  #ifdef CONFIG_BCACHE_DEBUG  	struct btree		*verify_data; +	struct bset		*verify_ondisk;  	struct mutex		verify_lock;  #endif  	unsigned		nr_uuids;  	struct uuid_entry	*uuids;  	BKEY_PADDED(uuid_bucket); -	struct closure_with_waitlist uuid_write; +	struct closure		uuid_write; +	struct semaphore	uuid_write_mutex;  	/*  	 * A btree node on disk could have too many bsets for an iterator to fit @@ -820,13 +634,7 @@ struct cache_set {  	 */  	mempool_t		*fill_iter; -	/* -	 * btree_sort() is a merge sort and requires temporary space - single -	 * element mempool -	 */ -	struct mutex		sort_lock; -	struct bset		*sort; -	unsigned		sort_crit_factor; +	struct bset_sort_state	sort;  	/* List of buckets we're currently writing data to */  	struct list_head	data_buckets; @@ -842,20 +650,23 @@ struct cache_set {  	unsigned		congested_read_threshold_us;  	unsigned		congested_write_threshold_us; -	spinlock_t		sort_time_lock; -	struct time_stats	sort_time;  	struct time_stats	btree_gc_time;  	struct time_stats	btree_split_time; -	spinlock_t		btree_read_time_lock;  	struct time_stats	btree_read_time; -	struct time_stats	try_harder_time;  	atomic_long_t		cache_read_races;  	atomic_long_t		writeback_keys_done;  	atomic_long_t		writeback_keys_failed; + +	enum			{ +		ON_ERROR_UNREGISTER, +		ON_ERROR_PANIC, +	}			on_error;  	unsigned		error_limit;  	unsigned		error_decay; +  	unsigned short		journal_delay_ms; +	bool			expensive_debug_checks;  	unsigned		verify:1;  	unsigned		key_merging_disabled:1;  	unsigned		gc_always_rewrite:1; @@ -866,21 +677,6 @@ struct cache_set {  	struct hlist_head	bucket_hash[1 << BUCKET_HASH_BITS];  }; -static inline bool key_merging_disabled(struct cache_set *c) -{ -#ifdef CONFIG_BCACHE_DEBUG -	return c->key_merging_disabled; -#else -	return 0; -#endif -} - -static inline bool SB_IS_BDEV(const struct cache_sb *sb) -{ -	return sb->version == BCACHE_SB_VERSION_BDEV -		|| sb->version == BCACHE_SB_VERSION_BDEV_WITH_OFFSET; -} -  struct bbio {  	unsigned		submit_time_us;  	union { @@ -894,13 +690,8 @@ struct bbio {  	struct bio		bio;  }; -static inline unsigned local_clock_us(void) -{ -	return local_clock() >> 10; -} -  #define BTREE_PRIO		USHRT_MAX -#define INITIAL_PRIO		32768 +#define INITIAL_PRIO		32768U  #define btree_bytes(c)		((c)->btree_pages * PAGE_SIZE)  #define btree_blocks(b)							\ @@ -913,80 +704,12 @@ static inline unsigned local_clock_us(void)  #define bucket_bytes(c)		((c)->sb.bucket_size << 9)  #define block_bytes(c)		((c)->sb.block_size << 9) -#define __set_bytes(i, k)	(sizeof(*(i)) + (k) * sizeof(uint64_t)) -#define set_bytes(i)		__set_bytes(i, i->keys) - -#define __set_blocks(i, k, c)	DIV_ROUND_UP(__set_bytes(i, k), block_bytes(c)) -#define set_blocks(i, c)	__set_blocks(i, (i)->keys, c) - -#define node(i, j)		((struct bkey *) ((i)->d + (j))) -#define end(i)			node(i, (i)->keys) - -#define index(i, b)							\ -	((size_t) (((void *) i - (void *) (b)->sets[0].data) /		\ -		   block_bytes(b->c))) - -#define btree_data_space(b)	(PAGE_SIZE << (b)->page_order) -  #define prios_per_bucket(c)				\  	((bucket_bytes(c) - sizeof(struct prio_set)) /	\  	 sizeof(struct bucket_disk))  #define prio_buckets(c)					\  	DIV_ROUND_UP((size_t) (c)->sb.nbuckets, prios_per_bucket(c)) -#define JSET_MAGIC		0x245235c1a3625032ULL -#define PSET_MAGIC		0x6750e15f87337f91ULL -#define BSET_MAGIC		0x90135c78b99e07f5ULL - -#define jset_magic(c)		((c)->sb.set_magic ^ JSET_MAGIC) -#define pset_magic(c)		((c)->sb.set_magic ^ PSET_MAGIC) -#define bset_magic(c)		((c)->sb.set_magic ^ BSET_MAGIC) - -/* Bkey fields: all units are in sectors */ - -#define KEY_FIELD(name, field, offset, size)				\ -	BITMASK(name, struct bkey, field, offset, size) - -#define PTR_FIELD(name, offset, size)					\ -	static inline uint64_t name(const struct bkey *k, unsigned i)	\ -	{ return (k->ptr[i] >> offset) & ~(((uint64_t) ~0) << size); }	\ -									\ -	static inline void SET_##name(struct bkey *k, unsigned i, uint64_t v)\ -	{								\ -		k->ptr[i] &= ~(~((uint64_t) ~0 << size) << offset);	\ -		k->ptr[i] |= v << offset;				\ -	} - -KEY_FIELD(KEY_PTRS,	high, 60, 3) -KEY_FIELD(HEADER_SIZE,	high, 58, 2) -KEY_FIELD(KEY_CSUM,	high, 56, 2) -KEY_FIELD(KEY_PINNED,	high, 55, 1) -KEY_FIELD(KEY_DIRTY,	high, 36, 1) - -KEY_FIELD(KEY_SIZE,	high, 20, 16) -KEY_FIELD(KEY_INODE,	high, 0,  20) - -/* Next time I change the on disk format, KEY_OFFSET() won't be 64 bits */ - -static inline uint64_t KEY_OFFSET(const struct bkey *k) -{ -	return k->low; -} - -static inline void SET_KEY_OFFSET(struct bkey *k, uint64_t v) -{ -	k->low = v; -} - -PTR_FIELD(PTR_DEV,		51, 12) -PTR_FIELD(PTR_OFFSET,		8,  43) -PTR_FIELD(PTR_GEN,		0,  8) - -#define PTR_CHECK_DEV		((1 << 12) - 1) - -#define PTR(gen, offset, dev)						\ -	((((uint64_t) dev) << 51) | ((uint64_t) offset) << 8 | gen) -  static inline size_t sector_to_bucket(struct cache_set *c, sector_t s)  {  	return s >> c->bucket_bits; @@ -1023,28 +746,25 @@ static inline struct bucket *PTR_BUCKET(struct cache_set *c,  	return PTR_CACHE(c, k, ptr)->buckets + PTR_BUCKET_NR(c, k, ptr);  } -/* Btree key macros */ +static inline uint8_t gen_after(uint8_t a, uint8_t b) +{ +	uint8_t r = a - b; +	return r > 128U ? 0 : r; +} -/* - * The high bit being set is a relic from when we used it to do binary - * searches - it told you where a key started. It's not used anymore, - * and can probably be safely dropped. - */ -#define KEY(dev, sector, len)						\ -((struct bkey) {							\ -	.high = (1ULL << 63) | ((uint64_t) (len) << 20) | (dev),	\ -	.low = (sector)							\ -}) +static inline uint8_t ptr_stale(struct cache_set *c, const struct bkey *k, +				unsigned i) +{ +	return gen_after(PTR_BUCKET(c, k, i)->gen, PTR_GEN(k, i)); +} -static inline void bkey_init(struct bkey *k) +static inline bool ptr_available(struct cache_set *c, const struct bkey *k, +				 unsigned i)  { -	*k = KEY(0, 0, 0); +	return (PTR_DEV(k, i) < MAX_CACHES_PER_SET) && PTR_CACHE(c, k, i);  } -#define KEY_START(k)		(KEY_OFFSET(k) - KEY_SIZE(k)) -#define START_KEY(k)		KEY(KEY_INODE(k), KEY_START(k), 0) -#define MAX_KEY			KEY(~(~0 << 20), ((uint64_t) ~0) >> 1, 0) -#define ZERO_KEY		KEY(0, 0, 0) +/* Btree key macros */  /*   * This is used for various on disk data structures - cache_sb, prio_set, bset, @@ -1052,7 +772,8 @@ static inline void bkey_init(struct bkey *k)   */  #define csum_set(i)							\  	bch_crc64(((void *) (i)) + sizeof(uint64_t),			\ -	      ((void *) end(i)) - (((void *) (i)) + sizeof(uint64_t))) +		  ((void *) bset_bkey_last(i)) -			\ +		  (((void *) (i)) + sizeof(uint64_t)))  /* Error handling macros */ @@ -1095,14 +816,6 @@ do {									\  	for (b = (ca)->buckets + (ca)->sb.first_bucket;			\  	     b < (ca)->buckets + (ca)->sb.nbuckets; b++) -static inline void __bkey_put(struct cache_set *c, struct bkey *k) -{ -	unsigned i; - -	for (i = 0; i < KEY_PTRS(k); i++) -		atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin); -} -  static inline void cached_dev_put(struct cached_dev *dc)  {  	if (atomic_dec_and_test(&dc->count)) @@ -1115,16 +828,13 @@ static inline bool cached_dev_get(struct cached_dev *dc)  		return false;  	/* Paired with the mb in cached_dev_attach */ -	smp_mb__after_atomic_inc(); +	smp_mb__after_atomic();  	return true;  }  /*   * bucket_gc_gen() returns the difference between the bucket's current gen and   * the oldest gen of any pointer into that bucket in the btree (last_gc). - * - * bucket_disk_gen() returns the difference between the current gen and the gen - * on disk; they're both used to make sure gens don't wrap around.   */  static inline uint8_t bucket_gc_gen(struct bucket *b) @@ -1132,13 +842,7 @@ static inline uint8_t bucket_gc_gen(struct bucket *b)  	return b->gen - b->last_gc;  } -static inline uint8_t bucket_disk_gen(struct bucket *b) -{ -	return b->gen - b->disk_gen; -} -  #define BUCKET_GC_GEN_MAX	96U -#define BUCKET_DISK_GEN_MAX	64U  #define kobj_attribute_write(n, fn)					\  	static struct kobj_attribute ksysfs_##n = __ATTR(n, S_IWUSR, NULL, fn) @@ -1165,22 +869,26 @@ void bch_bbio_endio(struct cache_set *, struct bio *, int, const char *);  void bch_bbio_free(struct bio *, struct cache_set *);  struct bio *bch_bbio_alloc(struct cache_set *); -struct bio *bch_bio_split(struct bio *, int, gfp_t, struct bio_set *);  void bch_generic_make_request(struct bio *, struct bio_split_pool *);  void __bch_submit_bbio(struct bio *, struct cache_set *);  void bch_submit_bbio(struct bio *, struct cache_set *, struct bkey *, unsigned);  uint8_t bch_inc_gen(struct cache *, struct bucket *);  void bch_rescale_priorities(struct cache_set *, int); -bool bch_bucket_add_unused(struct cache *, struct bucket *); -long bch_bucket_alloc(struct cache *, unsigned, struct closure *); +bool bch_can_invalidate_bucket(struct cache *, struct bucket *); +void __bch_invalidate_one_bucket(struct cache *, struct bucket *); + +void __bch_bucket_free(struct cache *, struct bucket *);  void bch_bucket_free(struct cache_set *, struct bkey *); +long bch_bucket_alloc(struct cache *, unsigned, bool);  int __bch_bucket_alloc_set(struct cache_set *, unsigned, -			   struct bkey *, int, struct closure *); +			   struct bkey *, int, bool);  int bch_bucket_alloc_set(struct cache_set *, unsigned, -			 struct bkey *, int, struct closure *); +			 struct bkey *, int, bool); +bool bch_alloc_sectors(struct cache_set *, struct bkey *, unsigned, +		       unsigned, unsigned, bool);  __printf(2, 3)  bool bch_cache_set_error(struct cache_set *, const char *, ...); @@ -1188,7 +896,7 @@ bool bch_cache_set_error(struct cache_set *, const char *, ...);  void bch_prio_write(struct cache *);  void bch_write_bdev_super(struct cached_dev *, struct closure *); -extern struct workqueue_struct *bcache_wq, *bch_gc_wq; +extern struct workqueue_struct *bcache_wq;  extern const char * const bch_cache_modes[];  extern struct mutex bch_register_lock;  extern struct list_head bch_cache_sets; @@ -1221,18 +929,14 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *);  void bch_btree_cache_free(struct cache_set *);  int bch_btree_cache_alloc(struct cache_set *);  void bch_moving_init_cache_set(struct cache_set *); +int bch_open_buckets_alloc(struct cache_set *); +void bch_open_buckets_free(struct cache_set *);  int bch_cache_allocator_start(struct cache *ca); -void bch_cache_allocator_exit(struct cache *ca); -int bch_cache_allocator_init(struct cache *ca);  void bch_debug_exit(void);  int bch_debug_init(struct kobject *); -void bch_writeback_exit(void); -int bch_writeback_init(void);  void bch_request_exit(void);  int bch_request_init(void); -void bch_btree_exit(void); -int bch_btree_init(void);  #endif /* _BCACHE_H */ diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c index 8010eed06a5..54541641530 100644 --- a/drivers/md/bcache/bset.c +++ b/drivers/md/bcache/bset.c @@ -5,177 +5,174 @@   * Copyright 2012 Google, Inc.   */ -#include "bcache.h" -#include "btree.h" -#include "debug.h" +#define pr_fmt(fmt) "bcache: %s() " fmt "\n", __func__ +#include "util.h" +#include "bset.h" + +#include <linux/console.h>  #include <linux/random.h>  #include <linux/prefetch.h> -/* Keylists */ +#ifdef CONFIG_BCACHE_DEBUG -void bch_keylist_copy(struct keylist *dest, struct keylist *src) +void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned set)  { -	*dest = *src; +	struct bkey *k, *next; + +	for (k = i->start; k < bset_bkey_last(i); k = next) { +		next = bkey_next(k); + +		printk(KERN_ERR "block %u key %u/%u: ", set, +		       (unsigned) ((u64 *) k - i->d), i->keys); -	if (src->list == src->d) { -		size_t n = (uint64_t *) src->top - src->d; -		dest->top = (struct bkey *) &dest->d[n]; -		dest->list = dest->d; +		if (b->ops->key_dump) +			b->ops->key_dump(b, k); +		else +			printk("%llu:%llu\n", KEY_INODE(k), KEY_OFFSET(k)); + +		if (next < bset_bkey_last(i) && +		    bkey_cmp(k, b->ops->is_extents ? +			     &START_KEY(next) : next) > 0) +			printk(KERN_ERR "Key skipped backwards\n");  	}  } -int bch_keylist_realloc(struct keylist *l, int nptrs, struct cache_set *c) +void bch_dump_bucket(struct btree_keys *b)  { -	unsigned oldsize = (uint64_t *) l->top - l->list; -	unsigned newsize = oldsize + 2 + nptrs; -	uint64_t *new; - -	/* The journalling code doesn't handle the case where the keys to insert -	 * is bigger than an empty write: If we just return -ENOMEM here, -	 * bio_insert() and bio_invalidate() will insert the keys created so far -	 * and finish the rest when the keylist is empty. -	 */ -	if (newsize * sizeof(uint64_t) > block_bytes(c) - sizeof(struct jset)) -		return -ENOMEM; - -	newsize = roundup_pow_of_two(newsize); +	unsigned i; -	if (newsize <= KEYLIST_INLINE || -	    roundup_pow_of_two(oldsize) == newsize) -		return 0; +	console_lock(); +	for (i = 0; i <= b->nsets; i++) +		bch_dump_bset(b, b->set[i].data, +			      bset_sector_offset(b, b->set[i].data)); +	console_unlock(); +} -	new = krealloc(l->list == l->d ? NULL : l->list, -		       sizeof(uint64_t) * newsize, GFP_NOIO); +int __bch_count_data(struct btree_keys *b) +{ +	unsigned ret = 0; +	struct btree_iter iter; +	struct bkey *k; -	if (!new) -		return -ENOMEM; +	if (b->ops->is_extents) +		for_each_key(b, k, &iter) +			ret += KEY_SIZE(k); +	return ret; +} -	if (l->list == l->d) -		memcpy(new, l->list, sizeof(uint64_t) * KEYLIST_INLINE); +void __bch_check_keys(struct btree_keys *b, const char *fmt, ...) +{ +	va_list args; +	struct bkey *k, *p = NULL; +	struct btree_iter iter; +	const char *err; -	l->list = new; -	l->top = (struct bkey *) (&l->list[oldsize]); +	for_each_key(b, k, &iter) { +		if (b->ops->is_extents) { +			err = "Keys out of order"; +			if (p && bkey_cmp(&START_KEY(p), &START_KEY(k)) > 0) +				goto bug; -	return 0; -} +			if (bch_ptr_invalid(b, k)) +				continue; -struct bkey *bch_keylist_pop(struct keylist *l) -{ -	struct bkey *k = l->bottom; +			err =  "Overlapping keys"; +			if (p && bkey_cmp(p, &START_KEY(k)) > 0) +				goto bug; +		} else { +			if (bch_ptr_bad(b, k)) +				continue; -	if (k == l->top) -		return NULL; +			err = "Duplicate keys"; +			if (p && !bkey_cmp(p, k)) +				goto bug; +		} +		p = k; +	} +#if 0 +	err = "Key larger than btree node key"; +	if (p && bkey_cmp(p, &b->key) > 0) +		goto bug; +#endif +	return; +bug: +	bch_dump_bucket(b); -	while (bkey_next(k) != l->top) -		k = bkey_next(k); +	va_start(args, fmt); +	vprintk(fmt, args); +	va_end(args); -	return l->top = k; +	panic("bch_check_keys error:  %s:\n", err);  } -/* Pointer validation */ - -bool __bch_ptr_invalid(struct cache_set *c, int level, const struct bkey *k) +static void bch_btree_iter_next_check(struct btree_iter *iter)  { -	unsigned i; -	char buf[80]; +	struct bkey *k = iter->data->k, *next = bkey_next(k); -	if (level && (!KEY_PTRS(k) || !KEY_SIZE(k) || KEY_DIRTY(k))) -		goto bad; +	if (next < iter->data->end && +	    bkey_cmp(k, iter->b->ops->is_extents ? +		     &START_KEY(next) : next) > 0) { +		bch_dump_bucket(iter->b); +		panic("Key skipped backwards\n"); +	} +} -	if (!level && KEY_SIZE(k) > KEY_OFFSET(k)) -		goto bad; +#else -	if (!KEY_SIZE(k)) -		return true; +static inline void bch_btree_iter_next_check(struct btree_iter *iter) {} -	for (i = 0; i < KEY_PTRS(k); i++) -		if (ptr_available(c, k, i)) { -			struct cache *ca = PTR_CACHE(c, k, i); -			size_t bucket = PTR_BUCKET_NR(c, k, i); -			size_t r = bucket_remainder(c, PTR_OFFSET(k, i)); - -			if (KEY_SIZE(k) + r > c->sb.bucket_size || -			    bucket <  ca->sb.first_bucket || -			    bucket >= ca->sb.nbuckets) -				goto bad; -		} +#endif -	return false; -bad: -	bch_bkey_to_text(buf, sizeof(buf), k); -	cache_bug(c, "spotted bad key %s: %s", buf, bch_ptr_status(c, k)); -	return true; -} +/* Keylists */ -bool bch_ptr_bad(struct btree *b, const struct bkey *k) +int __bch_keylist_realloc(struct keylist *l, unsigned u64s)  { -	struct bucket *g; -	unsigned i, stale; +	size_t oldsize = bch_keylist_nkeys(l); +	size_t newsize = oldsize + u64s; +	uint64_t *old_keys = l->keys_p == l->inline_keys ? NULL : l->keys_p; +	uint64_t *new_keys; -	if (!bkey_cmp(k, &ZERO_KEY) || -	    !KEY_PTRS(k) || -	    bch_ptr_invalid(b, k)) -		return true; +	newsize = roundup_pow_of_two(newsize); -	if (KEY_PTRS(k) && PTR_DEV(k, 0) == PTR_CHECK_DEV) -		return true; +	if (newsize <= KEYLIST_INLINE || +	    roundup_pow_of_two(oldsize) == newsize) +		return 0; -	for (i = 0; i < KEY_PTRS(k); i++) -		if (ptr_available(b->c, k, i)) { -			g = PTR_BUCKET(b->c, k, i); -			stale = ptr_stale(b->c, k, i); +	new_keys = krealloc(old_keys, sizeof(uint64_t) * newsize, GFP_NOIO); + +	if (!new_keys) +		return -ENOMEM; -			btree_bug_on(stale > 96, b, -				     "key too stale: %i, need_gc %u", -				     stale, b->c->need_gc); +	if (!old_keys) +		memcpy(new_keys, l->inline_keys, sizeof(uint64_t) * oldsize); -			btree_bug_on(stale && KEY_DIRTY(k) && KEY_SIZE(k), -				     b, "stale dirty pointer"); +	l->keys_p = new_keys; +	l->top_p = new_keys + oldsize; -			if (stale) -				return true; +	return 0; +} -#ifdef CONFIG_BCACHE_EDEBUG -			if (!mutex_trylock(&b->c->bucket_lock)) -				continue; +struct bkey *bch_keylist_pop(struct keylist *l) +{ +	struct bkey *k = l->keys; -			if (b->level) { -				if (KEY_DIRTY(k) || -				    g->prio != BTREE_PRIO || -				    (b->c->gc_mark_valid && -				     GC_MARK(g) != GC_MARK_METADATA)) -					goto bug; - -			} else { -				if (g->prio == BTREE_PRIO) -					goto bug; - -				if (KEY_DIRTY(k) && -				    b->c->gc_mark_valid && -				    GC_MARK(g) != GC_MARK_DIRTY) -					goto bug; -			} -			mutex_unlock(&b->c->bucket_lock); -#endif -		} +	if (k == l->top) +		return NULL; -	return false; -#ifdef CONFIG_BCACHE_EDEBUG -bug: -	mutex_unlock(&b->c->bucket_lock); +	while (bkey_next(k) != l->top) +		k = bkey_next(k); -	{ -		char buf[80]; +	return l->top = k; +} -		bch_bkey_to_text(buf, sizeof(buf), k); -		btree_bug(b, -"inconsistent pointer %s: bucket %zu pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i", -			  buf, PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin), -			  g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen); -	} -	return true; -#endif +void bch_keylist_pop_front(struct keylist *l) +{ +	l->top_p -= bkey_u64s(l->keys); + +	memmove(l->keys, +		bkey_next(l->keys), +		bch_keylist_bytes(l));  }  /* Key/pointer manipulation */ @@ -232,56 +229,138 @@ bool __bch_cut_back(const struct bkey *where, struct bkey *k)  	return true;  } -static uint64_t merge_chksums(struct bkey *l, struct bkey *r) +/* Auxiliary search trees */ + +/* 32 bits total: */ +#define BKEY_MID_BITS		3 +#define BKEY_EXPONENT_BITS	7 +#define BKEY_MANTISSA_BITS	(32 - BKEY_MID_BITS - BKEY_EXPONENT_BITS) +#define BKEY_MANTISSA_MASK	((1 << BKEY_MANTISSA_BITS) - 1) + +struct bkey_float { +	unsigned	exponent:BKEY_EXPONENT_BITS; +	unsigned	m:BKEY_MID_BITS; +	unsigned	mantissa:BKEY_MANTISSA_BITS; +} __packed; + +/* + * BSET_CACHELINE was originally intended to match the hardware cacheline size - + * it used to be 64, but I realized the lookup code would touch slightly less + * memory if it was 128. + * + * It definites the number of bytes (in struct bset) per struct bkey_float in + * the auxiliar search tree - when we're done searching the bset_float tree we + * have this many bytes left that we do a linear search over. + * + * Since (after level 5) every level of the bset_tree is on a new cacheline, + * we're touching one fewer cacheline in the bset tree in exchange for one more + * cacheline in the linear search - but the linear search might stop before it + * gets to the second cacheline. + */ + +#define BSET_CACHELINE		128 + +/* Space required for the btree node keys */ +static inline size_t btree_keys_bytes(struct btree_keys *b)  { -	return (l->ptr[KEY_PTRS(l)] + r->ptr[KEY_PTRS(r)]) & -		~((uint64_t)1 << 63); +	return PAGE_SIZE << b->page_order;  } -/* Tries to merge l and r: l should be lower than r - * Returns true if we were able to merge. If we did merge, l will be the merged - * key, r will be untouched. - */ -bool bch_bkey_try_merge(struct btree *b, struct bkey *l, struct bkey *r) +static inline size_t btree_keys_cachelines(struct btree_keys *b)  { -	unsigned i; +	return btree_keys_bytes(b) / BSET_CACHELINE; +} -	if (key_merging_disabled(b->c)) -		return false; +/* Space required for the auxiliary search trees */ +static inline size_t bset_tree_bytes(struct btree_keys *b) +{ +	return btree_keys_cachelines(b) * sizeof(struct bkey_float); +} -	if (KEY_PTRS(l) != KEY_PTRS(r) || -	    KEY_DIRTY(l) != KEY_DIRTY(r) || -	    bkey_cmp(l, &START_KEY(r))) -		return false; +/* Space required for the prev pointers */ +static inline size_t bset_prev_bytes(struct btree_keys *b) +{ +	return btree_keys_cachelines(b) * sizeof(uint8_t); +} -	for (i = 0; i < KEY_PTRS(l); i++) -		if (l->ptr[i] + PTR(0, KEY_SIZE(l), 0) != r->ptr[i] || -		    PTR_BUCKET_NR(b->c, l, i) != PTR_BUCKET_NR(b->c, r, i)) -			return false; +/* Memory allocation */ -	/* Keys with no pointers aren't restricted to one bucket and could -	 * overflow KEY_SIZE -	 */ -	if (KEY_SIZE(l) + KEY_SIZE(r) > USHRT_MAX) { -		SET_KEY_OFFSET(l, KEY_OFFSET(l) + USHRT_MAX - KEY_SIZE(l)); -		SET_KEY_SIZE(l, USHRT_MAX); +void bch_btree_keys_free(struct btree_keys *b) +{ +	struct bset_tree *t = b->set; -		bch_cut_front(l, r); -		return false; -	} +	if (bset_prev_bytes(b) < PAGE_SIZE) +		kfree(t->prev); +	else +		free_pages((unsigned long) t->prev, +			   get_order(bset_prev_bytes(b))); -	if (KEY_CSUM(l)) { -		if (KEY_CSUM(r)) -			l->ptr[KEY_PTRS(l)] = merge_chksums(l, r); -		else -			SET_KEY_CSUM(l, 0); -	} +	if (bset_tree_bytes(b) < PAGE_SIZE) +		kfree(t->tree); +	else +		free_pages((unsigned long) t->tree, +			   get_order(bset_tree_bytes(b))); -	SET_KEY_OFFSET(l, KEY_OFFSET(l) + KEY_SIZE(r)); -	SET_KEY_SIZE(l, KEY_SIZE(l) + KEY_SIZE(r)); +	free_pages((unsigned long) t->data, b->page_order); -	return true; +	t->prev = NULL; +	t->tree = NULL; +	t->data = NULL; +} +EXPORT_SYMBOL(bch_btree_keys_free); + +int bch_btree_keys_alloc(struct btree_keys *b, unsigned page_order, gfp_t gfp) +{ +	struct bset_tree *t = b->set; + +	BUG_ON(t->data); + +	b->page_order = page_order; + +	t->data = (void *) __get_free_pages(gfp, b->page_order); +	if (!t->data) +		goto err; + +	t->tree = bset_tree_bytes(b) < PAGE_SIZE +		? kmalloc(bset_tree_bytes(b), gfp) +		: (void *) __get_free_pages(gfp, get_order(bset_tree_bytes(b))); +	if (!t->tree) +		goto err; + +	t->prev = bset_prev_bytes(b) < PAGE_SIZE +		? kmalloc(bset_prev_bytes(b), gfp) +		: (void *) __get_free_pages(gfp, get_order(bset_prev_bytes(b))); +	if (!t->prev) +		goto err; + +	return 0; +err: +	bch_btree_keys_free(b); +	return -ENOMEM;  } +EXPORT_SYMBOL(bch_btree_keys_alloc); + +void bch_btree_keys_init(struct btree_keys *b, const struct btree_keys_ops *ops, +			 bool *expensive_debug_checks) +{ +	unsigned i; + +	b->ops = ops; +	b->expensive_debug_checks = expensive_debug_checks; +	b->nsets = 0; +	b->last_set_unwritten = 0; + +	/* XXX: shouldn't be needed */ +	for (i = 0; i < MAX_BSETS; i++) +		b->set[i].size = 0; +	/* +	 * Second loop starts at 1 because b->keys[0]->data is the memory we +	 * allocated +	 */ +	for (i = 1; i < MAX_BSETS; i++) +		b->set[i].data = NULL; +} +EXPORT_SYMBOL(bch_btree_keys_init);  /* Binary tree stuff for auxiliary search trees */ @@ -432,9 +511,11 @@ static unsigned bkey_to_cacheline(struct bset_tree *t, struct bkey *k)  	return ((void *) k - (void *) t->data) / BSET_CACHELINE;  } -static unsigned bkey_to_cacheline_offset(struct bkey *k) +static unsigned bkey_to_cacheline_offset(struct bset_tree *t, +					 unsigned cacheline, +					 struct bkey *k)  { -	return ((size_t) k & (BSET_CACHELINE - 1)) / sizeof(uint64_t); +	return (u64 *) k - (u64 *) cacheline_to_bkey(t, cacheline, 0);  }  static struct bkey *tree_to_bkey(struct bset_tree *t, unsigned j) @@ -458,16 +539,8 @@ static struct bkey *table_to_bkey(struct bset_tree *t, unsigned cacheline)  static inline uint64_t shrd128(uint64_t high, uint64_t low, uint8_t shift)  { -#ifdef CONFIG_X86_64 -	asm("shrd %[shift],%[high],%[low]" -	    : [low] "+Rm" (low) -	    : [high] "R" (high), -	    [shift] "ci" (shift) -	    : "cc"); -#else  	low >>= shift;  	low  |= (high << 1) << (63U - shift); -#endif  	return low;  } @@ -489,7 +562,7 @@ static void make_bfloat(struct bset_tree *t, unsigned j)  		: tree_to_prev_bkey(t, j >> ffs(j));  	struct bkey *r = is_power_of_2(j + 1) -		? node(t->data, t->data->keys - bkey_u64s(&t->end)) +		? bset_bkey_idx(t->data, t->data->keys - bkey_u64s(&t->end))  		: tree_to_bkey(t, j >> (ffz(j) + 1));  	BUG_ON(m < l || m > r); @@ -513,9 +586,9 @@ static void make_bfloat(struct bset_tree *t, unsigned j)  		f->exponent = 127;  } -static void bset_alloc_tree(struct btree *b, struct bset_tree *t) +static void bset_alloc_tree(struct btree_keys *b, struct bset_tree *t)  { -	if (t != b->sets) { +	if (t != b->set) {  		unsigned j = roundup(t[-1].size,  				     64 / sizeof(struct bkey_float)); @@ -523,33 +596,54 @@ static void bset_alloc_tree(struct btree *b, struct bset_tree *t)  		t->prev = t[-1].prev + j;  	} -	while (t < b->sets + MAX_BSETS) +	while (t < b->set + MAX_BSETS)  		t++->size = 0;  } -static void bset_build_unwritten_tree(struct btree *b) +static void bch_bset_build_unwritten_tree(struct btree_keys *b)  { -	struct bset_tree *t = b->sets + b->nsets; +	struct bset_tree *t = bset_tree_last(b); + +	BUG_ON(b->last_set_unwritten); +	b->last_set_unwritten = 1;  	bset_alloc_tree(b, t); -	if (t->tree != b->sets->tree + bset_tree_space(b)) { -		t->prev[0] = bkey_to_cacheline_offset(t->data->start); +	if (t->tree != b->set->tree + btree_keys_cachelines(b)) { +		t->prev[0] = bkey_to_cacheline_offset(t, 0, t->data->start);  		t->size = 1;  	}  } -static void bset_build_written_tree(struct btree *b) +void bch_bset_init_next(struct btree_keys *b, struct bset *i, uint64_t magic)  { -	struct bset_tree *t = b->sets + b->nsets; -	struct bkey *k = t->data->start; +	if (i != b->set->data) { +		b->set[++b->nsets].data = i; +		i->seq = b->set->data->seq; +	} else +		get_random_bytes(&i->seq, sizeof(uint64_t)); + +	i->magic	= magic; +	i->version	= 0; +	i->keys		= 0; + +	bch_bset_build_unwritten_tree(b); +} +EXPORT_SYMBOL(bch_bset_init_next); + +void bch_bset_build_written_tree(struct btree_keys *b) +{ +	struct bset_tree *t = bset_tree_last(b); +	struct bkey *prev = NULL, *k = t->data->start;  	unsigned j, cacheline = 1; +	b->last_set_unwritten = 0; +  	bset_alloc_tree(b, t);  	t->size = min_t(unsigned, -			bkey_to_cacheline(t, end(t->data)), -			b->sets->tree + bset_tree_space(b) - t->tree); +			bkey_to_cacheline(t, bset_bkey_last(t->data)), +			b->set->tree + btree_keys_cachelines(b) - t->tree);  	if (t->size < 2) {  		t->size = 0; @@ -562,16 +656,14 @@ static void bset_build_written_tree(struct btree *b)  	for (j = inorder_next(0, t->size);  	     j;  	     j = inorder_next(j, t->size)) { -		while (bkey_to_cacheline(t, k) != cacheline) -			k = bkey_next(k); +		while (bkey_to_cacheline(t, k) < cacheline) +			prev = k, k = bkey_next(k); -		t->prev[j] = bkey_u64s(k); -		k = bkey_next(k); -		cacheline++; -		t->tree[j].m = bkey_to_cacheline_offset(k); +		t->prev[j] = bkey_u64s(prev); +		t->tree[j].m = bkey_to_cacheline_offset(t, cacheline++, k);  	} -	while (bkey_next(k) != end(t->data)) +	while (bkey_next(k) != bset_bkey_last(t->data))  		k = bkey_next(k);  	t->end = *k; @@ -582,14 +674,17 @@ static void bset_build_written_tree(struct btree *b)  	     j = inorder_next(j, t->size))  		make_bfloat(t, j);  } +EXPORT_SYMBOL(bch_bset_build_written_tree); -void bch_bset_fix_invalidated_key(struct btree *b, struct bkey *k) +/* Insert */ + +void bch_bset_fix_invalidated_key(struct btree_keys *b, struct bkey *k)  {  	struct bset_tree *t;  	unsigned inorder, j = 1; -	for (t = b->sets; t <= &b->sets[b->nsets]; t++) -		if (k < end(t->data)) +	for (t = b->set; t <= bset_tree_last(b); t++) +		if (k < bset_bkey_last(t->data))  			goto found_set;  	BUG(); @@ -602,7 +697,7 @@ found_set:  	if (k == t->data->start)  		goto fix_left; -	if (bkey_next(k) == end(t->data)) { +	if (bkey_next(k) == bset_bkey_last(t->data)) {  		t->end = *k;  		goto fix_right;  	} @@ -627,10 +722,12 @@ fix_right:	do {  			j = j * 2 + 1;  		} while (j < t->size);  } +EXPORT_SYMBOL(bch_bset_fix_invalidated_key); -void bch_bset_fix_lookup_table(struct btree *b, struct bkey *k) +static void bch_bset_fix_lookup_table(struct btree_keys *b, +				      struct bset_tree *t, +				      struct bkey *k)  { -	struct bset_tree *t = &b->sets[b->nsets];  	unsigned shift = bkey_u64s(k);  	unsigned j = bkey_to_cacheline(t, k); @@ -642,8 +739,8 @@ void bch_bset_fix_lookup_table(struct btree *b, struct bkey *k)  	 * lookup table for the first key that is strictly greater than k:  	 * it's either k's cacheline or the next one  	 */ -	if (j < t->size && -	    table_to_bkey(t, j) <= k) +	while (j < t->size && +	       table_to_bkey(t, j) <= k)  		j++;  	/* Adjust all the lookup table entries, and find a new key for any that @@ -658,54 +755,124 @@ void bch_bset_fix_lookup_table(struct btree *b, struct bkey *k)  			while (k < cacheline_to_bkey(t, j, 0))  				k = bkey_next(k); -			t->prev[j] = bkey_to_cacheline_offset(k); +			t->prev[j] = bkey_to_cacheline_offset(t, j, k);  		}  	} -	if (t->size == b->sets->tree + bset_tree_space(b) - t->tree) +	if (t->size == b->set->tree + btree_keys_cachelines(b) - t->tree)  		return;  	/* Possibly add a new entry to the end of the lookup table */  	for (k = table_to_bkey(t, t->size - 1); -	     k != end(t->data); +	     k != bset_bkey_last(t->data);  	     k = bkey_next(k))  		if (t->size == bkey_to_cacheline(t, k)) { -			t->prev[t->size] = bkey_to_cacheline_offset(k); +			t->prev[t->size] = bkey_to_cacheline_offset(t, t->size, k);  			t->size++;  		}  } -void bch_bset_init_next(struct btree *b) +/* + * Tries to merge l and r: l should be lower than r + * Returns true if we were able to merge. If we did merge, l will be the merged + * key, r will be untouched. + */ +bool bch_bkey_try_merge(struct btree_keys *b, struct bkey *l, struct bkey *r)  { -	struct bset *i = write_block(b); +	if (!b->ops->key_merge) +		return false; -	if (i != b->sets[0].data) { -		b->sets[++b->nsets].data = i; -		i->seq = b->sets[0].data->seq; -	} else -		get_random_bytes(&i->seq, sizeof(uint64_t)); +	/* +	 * Generic header checks +	 * Assumes left and right are in order +	 * Left and right must be exactly aligned +	 */ +	if (!bch_bkey_equal_header(l, r) || +	     bkey_cmp(l, &START_KEY(r))) +		return false; -	i->magic	= bset_magic(b->c); -	i->version	= 0; -	i->keys		= 0; +	return b->ops->key_merge(b, l, r); +} +EXPORT_SYMBOL(bch_bkey_try_merge); + +void bch_bset_insert(struct btree_keys *b, struct bkey *where, +		     struct bkey *insert) +{ +	struct bset_tree *t = bset_tree_last(b); + +	BUG_ON(!b->last_set_unwritten); +	BUG_ON(bset_byte_offset(b, t->data) + +	       __set_bytes(t->data, t->data->keys + bkey_u64s(insert)) > +	       PAGE_SIZE << b->page_order); + +	memmove((uint64_t *) where + bkey_u64s(insert), +		where, +		(void *) bset_bkey_last(t->data) - (void *) where); + +	t->data->keys += bkey_u64s(insert); +	bkey_copy(where, insert); +	bch_bset_fix_lookup_table(b, t, where); +} +EXPORT_SYMBOL(bch_bset_insert); + +unsigned bch_btree_insert_key(struct btree_keys *b, struct bkey *k, +			      struct bkey *replace_key) +{ +	unsigned status = BTREE_INSERT_STATUS_NO_INSERT; +	struct bset *i = bset_tree_last(b)->data; +	struct bkey *m, *prev = NULL; +	struct btree_iter iter; + +	BUG_ON(b->ops->is_extents && !KEY_SIZE(k)); + +	m = bch_btree_iter_init(b, &iter, b->ops->is_extents +				? PRECEDING_KEY(&START_KEY(k)) +				: PRECEDING_KEY(k)); + +	if (b->ops->insert_fixup(b, k, &iter, replace_key)) +		return status; -	bset_build_unwritten_tree(b); +	status = BTREE_INSERT_STATUS_INSERT; + +	while (m != bset_bkey_last(i) && +	       bkey_cmp(k, b->ops->is_extents ? &START_KEY(m) : m) > 0) +		prev = m, m = bkey_next(m); + +	/* prev is in the tree, if we merge we're done */ +	status = BTREE_INSERT_STATUS_BACK_MERGE; +	if (prev && +	    bch_bkey_try_merge(b, prev, k)) +		goto merged; +#if 0 +	status = BTREE_INSERT_STATUS_OVERWROTE; +	if (m != bset_bkey_last(i) && +	    KEY_PTRS(m) == KEY_PTRS(k) && !KEY_SIZE(m)) +		goto copy; +#endif +	status = BTREE_INSERT_STATUS_FRONT_MERGE; +	if (m != bset_bkey_last(i) && +	    bch_bkey_try_merge(b, k, m)) +		goto copy; + +	bch_bset_insert(b, m, k); +copy:	bkey_copy(m, k); +merged: +	return status;  } +EXPORT_SYMBOL(bch_btree_insert_key); + +/* Lookup */  struct bset_search_iter {  	struct bkey *l, *r;  }; -static struct bset_search_iter bset_search_write_set(struct btree *b, -						     struct bset_tree *t, +static struct bset_search_iter bset_search_write_set(struct bset_tree *t,  						     const struct bkey *search)  {  	unsigned li = 0, ri = t->size; -	BUG_ON(!b->nsets && -	       t->size < bkey_to_cacheline(t, end(t->data))); -  	while (li + 1 != ri) {  		unsigned m = (li + ri) >> 1; @@ -717,12 +884,11 @@ static struct bset_search_iter bset_search_write_set(struct btree *b,  	return (struct bset_search_iter) {  		table_to_bkey(t, li), -		ri < t->size ? table_to_bkey(t, ri) : end(t->data) +		ri < t->size ? table_to_bkey(t, ri) : bset_bkey_last(t->data)  	};  } -static struct bset_search_iter bset_search_tree(struct btree *b, -						struct bset_tree *t, +static struct bset_search_iter bset_search_tree(struct bset_tree *t,  						const struct bkey *search)  {  	struct bkey *l, *r; @@ -769,7 +935,7 @@ static struct bset_search_iter bset_search_tree(struct btree *b,  			f = &t->tree[inorder_next(j, t->size)];  			r = cacheline_to_bkey(t, inorder, f->m);  		} else -			r = end(t->data); +			r = bset_bkey_last(t->data);  	} else {  		r = cacheline_to_bkey(t, inorder, f->m); @@ -783,7 +949,7 @@ static struct bset_search_iter bset_search_tree(struct btree *b,  	return (struct bset_search_iter) {l, r};  } -struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t, +struct bkey *__bch_bset_search(struct btree_keys *b, struct bset_tree *t,  			       const struct bkey *search)  {  	struct bset_search_iter i; @@ -805,7 +971,7 @@ struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t,  	if (unlikely(!t->size)) {  		i.l = t->data->start; -		i.r = end(t->data); +		i.r = bset_bkey_last(t->data);  	} else if (bset_written(b, t)) {  		/*  		 * Each node in the auxiliary search tree covers a certain range @@ -815,25 +981,29 @@ struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t,  		 */  		if (unlikely(bkey_cmp(search, &t->end) >= 0)) -			return end(t->data); +			return bset_bkey_last(t->data);  		if (unlikely(bkey_cmp(search, t->data->start) < 0))  			return t->data->start; -		i = bset_search_tree(b, t, search); -	} else -		i = bset_search_write_set(b, t, search); +		i = bset_search_tree(t, search); +	} else { +		BUG_ON(!b->nsets && +		       t->size < bkey_to_cacheline(t, bset_bkey_last(t->data))); + +		i = bset_search_write_set(t, search); +	} -#ifdef CONFIG_BCACHE_EDEBUG -	BUG_ON(bset_written(b, t) && -	       i.l != t->data->start && -	       bkey_cmp(tree_to_prev_bkey(t, -		  inorder_to_tree(bkey_to_cacheline(t, i.l), t)), -			search) > 0); +	if (btree_keys_expensive_checks(b)) { +		BUG_ON(bset_written(b, t) && +		       i.l != t->data->start && +		       bkey_cmp(tree_to_prev_bkey(t, +			  inorder_to_tree(bkey_to_cacheline(t, i.l), t)), +				search) > 0); -	BUG_ON(i.r != end(t->data) && -	       bkey_cmp(i.r, search) <= 0); -#endif +		BUG_ON(i.r != bset_bkey_last(t->data) && +		       bkey_cmp(i.r, search) <= 0); +	}  	while (likely(i.l != i.r) &&  	       bkey_cmp(i.l, search) <= 0) @@ -841,15 +1011,17 @@ struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t,  	return i.l;  } +EXPORT_SYMBOL(__bch_bset_search);  /* Btree iterator */ +typedef bool (btree_iter_cmp_fn)(struct btree_iter_set, +				 struct btree_iter_set); +  static inline bool btree_iter_cmp(struct btree_iter_set l,  				  struct btree_iter_set r)  { -	int64_t c = bkey_cmp(&START_KEY(l.k), &START_KEY(r.k)); - -	return c ? c > 0 : l.k < r.k; +	return bkey_cmp(l.k, r.k) > 0;  }  static inline bool btree_iter_end(struct btree_iter *iter) @@ -866,27 +1038,44 @@ void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k,  				 btree_iter_cmp));  } -struct bkey *__bch_btree_iter_init(struct btree *b, struct btree_iter *iter, -			       struct bkey *search, struct bset_tree *start) +static struct bkey *__bch_btree_iter_init(struct btree_keys *b, +					  struct btree_iter *iter, +					  struct bkey *search, +					  struct bset_tree *start)  {  	struct bkey *ret = NULL;  	iter->size = ARRAY_SIZE(iter->data);  	iter->used = 0; -	for (; start <= &b->sets[b->nsets]; start++) { +#ifdef CONFIG_BCACHE_DEBUG +	iter->b = b; +#endif + +	for (; start <= bset_tree_last(b); start++) {  		ret = bch_bset_search(b, start, search); -		bch_btree_iter_push(iter, ret, end(start->data)); +		bch_btree_iter_push(iter, ret, bset_bkey_last(start->data));  	}  	return ret;  } -struct bkey *bch_btree_iter_next(struct btree_iter *iter) +struct bkey *bch_btree_iter_init(struct btree_keys *b, +				 struct btree_iter *iter, +				 struct bkey *search) +{ +	return __bch_btree_iter_init(b, iter, search, b->set); +} +EXPORT_SYMBOL(bch_btree_iter_init); + +static inline struct bkey *__bch_btree_iter_next(struct btree_iter *iter, +						 btree_iter_cmp_fn *cmp)  {  	struct btree_iter_set unused;  	struct bkey *ret = NULL;  	if (!btree_iter_end(iter)) { +		bch_btree_iter_next_check(iter); +  		ret = iter->data->k;  		iter->data->k = bkey_next(iter->data->k); @@ -896,16 +1085,23 @@ struct bkey *bch_btree_iter_next(struct btree_iter *iter)  		}  		if (iter->data->k == iter->data->end) -			heap_pop(iter, unused, btree_iter_cmp); +			heap_pop(iter, unused, cmp);  		else -			heap_sift(iter, 0, btree_iter_cmp); +			heap_sift(iter, 0, cmp);  	}  	return ret;  } +struct bkey *bch_btree_iter_next(struct btree_iter *iter) +{ +	return __bch_btree_iter_next(iter, btree_iter_cmp); + +} +EXPORT_SYMBOL(bch_btree_iter_next); +  struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter, -					struct btree *b, ptr_filter_fn fn) +					struct btree_keys *b, ptr_filter_fn fn)  {  	struct bkey *ret; @@ -916,63 +1112,60 @@ struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter,  	return ret;  } -struct bkey *bch_next_recurse_key(struct btree *b, struct bkey *search) -{ -	struct btree_iter iter; - -	bch_btree_iter_init(b, &iter, search); -	return bch_btree_iter_next_filter(&iter, b, bch_ptr_bad); -} -  /* Mergesort */ -static void btree_sort_fixup(struct btree_iter *iter) +void bch_bset_sort_state_free(struct bset_sort_state *state)  { -	while (iter->used > 1) { -		struct btree_iter_set *top = iter->data, *i = top + 1; -		struct bkey *k; +	if (state->pool) +		mempool_destroy(state->pool); +} -		if (iter->used > 2 && -		    btree_iter_cmp(i[0], i[1])) -			i++; +int bch_bset_sort_state_init(struct bset_sort_state *state, unsigned page_order) +{ +	spin_lock_init(&state->time.lock); -		for (k = i->k; -		     k != i->end && bkey_cmp(top->k, &START_KEY(k)) > 0; -		     k = bkey_next(k)) -			if (top->k > i->k) -				__bch_cut_front(top->k, k); -			else if (KEY_SIZE(k)) -				bch_cut_back(&START_KEY(k), top->k); +	state->page_order = page_order; +	state->crit_factor = int_sqrt(1 << page_order); -		if (top->k < i->k || k == i->k) -			break; +	state->pool = mempool_create_page_pool(1, page_order); +	if (!state->pool) +		return -ENOMEM; -		heap_sift(iter, i - top, btree_iter_cmp); -	} +	return 0;  } +EXPORT_SYMBOL(bch_bset_sort_state_init); -static void btree_mergesort(struct btree *b, struct bset *out, +static void btree_mergesort(struct btree_keys *b, struct bset *out,  			    struct btree_iter *iter,  			    bool fixup, bool remove_stale)  { +	int i;  	struct bkey *k, *last = NULL; -	bool (*bad)(struct btree *, const struct bkey *) = remove_stale +	BKEY_PADDED(k) tmp; +	bool (*bad)(struct btree_keys *, const struct bkey *) = remove_stale  		? bch_ptr_bad  		: bch_ptr_invalid; +	/* Heapify the iterator, using our comparison function */ +	for (i = iter->used / 2 - 1; i >= 0; --i) +		heap_sift(iter, i, b->ops->sort_cmp); +  	while (!btree_iter_end(iter)) { -		if (fixup && !b->level) -			btree_sort_fixup(iter); +		if (b->ops->sort_fixup && fixup) +			k = b->ops->sort_fixup(iter, &tmp.k); +		else +			k = NULL; + +		if (!k) +			k = __bch_btree_iter_next(iter, b->ops->sort_cmp); -		k = bch_btree_iter_next(iter);  		if (bad(b, k))  			continue;  		if (!last) {  			last = out->start;  			bkey_copy(last, k); -		} else if (b->level || -			   !bch_bkey_try_merge(b, last, k)) { +		} else if (!bch_bkey_try_merge(b, last, k)) {  			last = bkey_next(last);  			bkey_copy(last, k);  		} @@ -981,30 +1174,32 @@ static void btree_mergesort(struct btree *b, struct bset *out,  	out->keys = last ? (uint64_t *) bkey_next(last) - out->d : 0;  	pr_debug("sorted %i keys", out->keys); -	bch_check_key_order(b, out);  } -static void __btree_sort(struct btree *b, struct btree_iter *iter, -			 unsigned start, unsigned order, bool fixup) +static void __btree_sort(struct btree_keys *b, struct btree_iter *iter, +			 unsigned start, unsigned order, bool fixup, +			 struct bset_sort_state *state)  {  	uint64_t start_time; -	bool remove_stale = !b->written; +	bool used_mempool = false;  	struct bset *out = (void *) __get_free_pages(__GFP_NOWARN|GFP_NOIO,  						     order);  	if (!out) { -		mutex_lock(&b->c->sort_lock); -		out = b->c->sort; -		order = ilog2(bucket_pages(b->c)); +		struct page *outp; + +		BUG_ON(order > state->page_order); + +		outp = mempool_alloc(state->pool, GFP_NOIO); +		out = page_address(outp); +		used_mempool = true; +		order = state->page_order;  	}  	start_time = local_clock(); -	btree_mergesort(b, out, iter, fixup, remove_stale); +	btree_mergesort(b, out, iter, fixup, false);  	b->nsets = start; -	if (!fixup && !start && b->written) -		bch_btree_verify(b, out); -  	if (!start && order == b->page_order) {  		/*  		 * Our temporary buffer is the same size as the btree node's @@ -1012,89 +1207,76 @@ static void __btree_sort(struct btree *b, struct btree_iter *iter,  		 * memcpy()  		 */ -		out->magic	= bset_magic(b->c); -		out->seq	= b->sets[0].data->seq; -		out->version	= b->sets[0].data->version; -		swap(out, b->sets[0].data); - -		if (b->c->sort == b->sets[0].data) -			b->c->sort = out; +		out->magic	= b->set->data->magic; +		out->seq	= b->set->data->seq; +		out->version	= b->set->data->version; +		swap(out, b->set->data);  	} else { -		b->sets[start].data->keys = out->keys; -		memcpy(b->sets[start].data->start, out->start, -		       (void *) end(out) - (void *) out->start); +		b->set[start].data->keys = out->keys; +		memcpy(b->set[start].data->start, out->start, +		       (void *) bset_bkey_last(out) - (void *) out->start);  	} -	if (out == b->c->sort) -		mutex_unlock(&b->c->sort_lock); +	if (used_mempool) +		mempool_free(virt_to_page(out), state->pool);  	else  		free_pages((unsigned long) out, order); -	if (b->written) -		bset_build_written_tree(b); +	bch_bset_build_written_tree(b); -	if (!start) { -		spin_lock(&b->c->sort_time_lock); -		bch_time_stats_update(&b->c->sort_time, start_time); -		spin_unlock(&b->c->sort_time_lock); -	} +	if (!start) +		bch_time_stats_update(&state->time, start_time);  } -void bch_btree_sort_partial(struct btree *b, unsigned start) +void bch_btree_sort_partial(struct btree_keys *b, unsigned start, +			    struct bset_sort_state *state)  { -	size_t oldsize = 0, order = b->page_order, keys = 0; +	size_t order = b->page_order, keys = 0;  	struct btree_iter iter; -	__bch_btree_iter_init(b, &iter, NULL, &b->sets[start]); - -	BUG_ON(b->sets[b->nsets].data == write_block(b) && -	       (b->sets[b->nsets].size || b->nsets)); +	int oldsize = bch_count_data(b); -	if (b->written) -		oldsize = bch_count_data(b); +	__bch_btree_iter_init(b, &iter, NULL, &b->set[start]);  	if (start) {  		unsigned i;  		for (i = start; i <= b->nsets; i++) -			keys += b->sets[i].data->keys; +			keys += b->set[i].data->keys; -		order = roundup_pow_of_two(__set_bytes(b->sets->data, -						       keys)) / PAGE_SIZE; -		if (order) -			order = ilog2(order); +		order = get_order(__set_bytes(b->set->data, keys));  	} -	__btree_sort(b, &iter, start, order, false); +	__btree_sort(b, &iter, start, order, false, state); -	EBUG_ON(b->written && bch_count_data(b) != oldsize); +	EBUG_ON(oldsize >= 0 && bch_count_data(b) != oldsize);  } +EXPORT_SYMBOL(bch_btree_sort_partial); -void bch_btree_sort_and_fix_extents(struct btree *b, struct btree_iter *iter) +void bch_btree_sort_and_fix_extents(struct btree_keys *b, +				    struct btree_iter *iter, +				    struct bset_sort_state *state)  { -	BUG_ON(!b->written); -	__btree_sort(b, iter, 0, b->page_order, true); +	__btree_sort(b, iter, 0, b->page_order, true, state);  } -void bch_btree_sort_into(struct btree *b, struct btree *new) +void bch_btree_sort_into(struct btree_keys *b, struct btree_keys *new, +			 struct bset_sort_state *state)  {  	uint64_t start_time = local_clock();  	struct btree_iter iter;  	bch_btree_iter_init(b, &iter, NULL); -	btree_mergesort(b, new->sets->data, &iter, false, true); +	btree_mergesort(b, new->set->data, &iter, false, true); -	spin_lock(&b->c->sort_time_lock); -	bch_time_stats_update(&b->c->sort_time, start_time); -	spin_unlock(&b->c->sort_time_lock); +	bch_time_stats_update(&state->time, start_time); -	bkey_copy_key(&new->key, &b->key); -	new->sets->size = 0; +	new->set->size = 0; // XXX: why?  }  #define SORT_CRIT	(4096 / sizeof(uint64_t)) -void bch_btree_sort_lazy(struct btree *b) +void bch_btree_sort_lazy(struct btree_keys *b, struct bset_sort_state *state)  {  	unsigned crit = SORT_CRIT;  	int i; @@ -1103,50 +1285,32 @@ void bch_btree_sort_lazy(struct btree *b)  	if (!b->nsets)  		goto out; -	/* If not a leaf node, always sort */ -	if (b->level) { -		bch_btree_sort(b); -		return; -	} -  	for (i = b->nsets - 1; i >= 0; --i) { -		crit *= b->c->sort_crit_factor; +		crit *= state->crit_factor; -		if (b->sets[i].data->keys < crit) { -			bch_btree_sort_partial(b, i); +		if (b->set[i].data->keys < crit) { +			bch_btree_sort_partial(b, i, state);  			return;  		}  	}  	/* Sort if we'd overflow */  	if (b->nsets + 1 == MAX_BSETS) { -		bch_btree_sort(b); +		bch_btree_sort(b, state);  		return;  	}  out: -	bset_build_written_tree(b); +	bch_bset_build_written_tree(b);  } +EXPORT_SYMBOL(bch_btree_sort_lazy); -/* Sysfs stuff */ - -struct bset_stats { -	size_t nodes; -	size_t sets_written, sets_unwritten; -	size_t bytes_written, bytes_unwritten; -	size_t floats, failed; -}; - -static int bch_btree_bset_stats(struct btree *b, struct btree_op *op, -			    struct bset_stats *stats) +void bch_btree_keys_stats(struct btree_keys *b, struct bset_stats *stats)  { -	struct bkey *k;  	unsigned i; -	stats->nodes++; -  	for (i = 0; i <= b->nsets; i++) { -		struct bset_tree *t = &b->sets[i]; +		struct bset_tree *t = &b->set[i];  		size_t bytes = t->data->keys * sizeof(uint64_t);  		size_t j; @@ -1164,43 +1328,4 @@ static int bch_btree_bset_stats(struct btree *b, struct btree_op *op,  			stats->bytes_unwritten += bytes;  		}  	} - -	if (b->level) { -		struct btree_iter iter; - -		for_each_key_filter(b, k, &iter, bch_ptr_bad) { -			int ret = btree(bset_stats, k, b, op, stats); -			if (ret) -				return ret; -		} -	} - -	return 0; -} - -int bch_bset_print_stats(struct cache_set *c, char *buf) -{ -	struct btree_op op; -	struct bset_stats t; -	int ret; - -	bch_btree_op_init_stack(&op); -	memset(&t, 0, sizeof(struct bset_stats)); - -	ret = btree_root(bset_stats, c, &op, &t); -	if (ret) -		return ret; - -	return snprintf(buf, PAGE_SIZE, -			"btree nodes:		%zu\n" -			"written sets:		%zu\n" -			"unwritten sets:		%zu\n" -			"written key bytes:	%zu\n" -			"unwritten key bytes:	%zu\n" -			"floats:			%zu\n" -			"failed:			%zu\n", -			t.nodes, -			t.sets_written, t.sets_unwritten, -			t.bytes_written, t.bytes_unwritten, -			t.floats, t.failed);  } diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h index ae115a253d7..5f6728d5d4d 100644 --- a/drivers/md/bcache/bset.h +++ b/drivers/md/bcache/bset.h @@ -1,7 +1,11 @@  #ifndef _BCACHE_BSET_H  #define _BCACHE_BSET_H -#include <linux/slab.h> +#include <linux/bcache.h> +#include <linux/kernel.h> +#include <linux/types.h> + +#include "util.h" /* for time_stats */  /*   * BKEYS: @@ -142,17 +146,13 @@   * first key in that range of bytes again.   */ -/* Btree key comparison/iteration */ +struct btree_keys; +struct btree_iter; +struct btree_iter_set; +struct bkey_float;  #define MAX_BSETS		4U -struct btree_iter { -	size_t size, used; -	struct btree_iter_set { -		struct bkey *k, *end; -	} data[MAX_BSETS]; -}; -  struct bset_tree {  	/*  	 * We construct a binary tree in an array as if the array @@ -162,14 +162,14 @@ struct bset_tree {  	 */  	/* size of the binary tree and prev array */ -	unsigned	size; +	unsigned		size;  	/* function of size - precalculated for to_inorder() */ -	unsigned	extra; +	unsigned		extra;  	/* copy of the last key in the set */ -	struct bkey	end; -	struct bkey_float *tree; +	struct bkey		end; +	struct bkey_float	*tree;  	/*  	 * The nodes in the bset tree point to specific keys - this @@ -179,96 +179,227 @@ struct bset_tree {  	 * to keep bkey_float to 4 bytes and prev isn't used in the fast  	 * path.  	 */ -	uint8_t		*prev; +	uint8_t			*prev;  	/* The actual btree node, with pointers to each sorted set */ -	struct bset	*data; +	struct bset		*data;  }; -static __always_inline int64_t bkey_cmp(const struct bkey *l, -					const struct bkey *r) +struct btree_keys_ops { +	bool		(*sort_cmp)(struct btree_iter_set, +				    struct btree_iter_set); +	struct bkey	*(*sort_fixup)(struct btree_iter *, struct bkey *); +	bool		(*insert_fixup)(struct btree_keys *, struct bkey *, +					struct btree_iter *, struct bkey *); +	bool		(*key_invalid)(struct btree_keys *, +				       const struct bkey *); +	bool		(*key_bad)(struct btree_keys *, const struct bkey *); +	bool		(*key_merge)(struct btree_keys *, +				     struct bkey *, struct bkey *); +	void		(*key_to_text)(char *, size_t, const struct bkey *); +	void		(*key_dump)(struct btree_keys *, const struct bkey *); + +	/* +	 * Only used for deciding whether to use START_KEY(k) or just the key +	 * itself in a couple places +	 */ +	bool		is_extents; +}; + +struct btree_keys { +	const struct btree_keys_ops	*ops; +	uint8_t			page_order; +	uint8_t			nsets; +	unsigned		last_set_unwritten:1; +	bool			*expensive_debug_checks; + +	/* +	 * Sets of sorted keys - the real btree node - plus a binary search tree +	 * +	 * set[0] is special; set[0]->tree, set[0]->prev and set[0]->data point +	 * to the memory we have allocated for this btree node. Additionally, +	 * set[0]->data points to the entire btree node as it exists on disk. +	 */ +	struct bset_tree	set[MAX_BSETS]; +}; + +static inline struct bset_tree *bset_tree_last(struct btree_keys *b)  { -	return unlikely(KEY_INODE(l) != KEY_INODE(r)) -		? (int64_t) KEY_INODE(l) - (int64_t) KEY_INODE(r) -		: (int64_t) KEY_OFFSET(l) - (int64_t) KEY_OFFSET(r); +	return b->set + b->nsets;  } -static inline size_t bkey_u64s(const struct bkey *k) +static inline bool bset_written(struct btree_keys *b, struct bset_tree *t)  { -	BUG_ON(KEY_CSUM(k) > 1); -	return 2 + KEY_PTRS(k) + (KEY_CSUM(k) ? 1 : 0); +	return t <= b->set + b->nsets - b->last_set_unwritten;  } -static inline size_t bkey_bytes(const struct bkey *k) +static inline bool bkey_written(struct btree_keys *b, struct bkey *k)  { -	return bkey_u64s(k) * sizeof(uint64_t); +	return !b->last_set_unwritten || k < b->set[b->nsets].data->start;  } -static inline void bkey_copy(struct bkey *dest, const struct bkey *src) +static inline unsigned bset_byte_offset(struct btree_keys *b, struct bset *i)  { -	memcpy(dest, src, bkey_bytes(src)); +	return ((size_t) i) - ((size_t) b->set->data);  } -static inline void bkey_copy_key(struct bkey *dest, const struct bkey *src) +static inline unsigned bset_sector_offset(struct btree_keys *b, struct bset *i)  { -	if (!src) -		src = &KEY(0, 0, 0); +	return bset_byte_offset(b, i) >> 9; +} + +#define __set_bytes(i, k)	(sizeof(*(i)) + (k) * sizeof(uint64_t)) +#define set_bytes(i)		__set_bytes(i, i->keys) + +#define __set_blocks(i, k, block_bytes)				\ +	DIV_ROUND_UP(__set_bytes(i, k), block_bytes) +#define set_blocks(i, block_bytes)				\ +	__set_blocks(i, (i)->keys, block_bytes) -	SET_KEY_INODE(dest, KEY_INODE(src)); -	SET_KEY_OFFSET(dest, KEY_OFFSET(src)); +static inline size_t bch_btree_keys_u64s_remaining(struct btree_keys *b) +{ +	struct bset_tree *t = bset_tree_last(b); + +	BUG_ON((PAGE_SIZE << b->page_order) < +	       (bset_byte_offset(b, t->data) + set_bytes(t->data))); + +	if (!b->last_set_unwritten) +		return 0; + +	return ((PAGE_SIZE << b->page_order) - +		(bset_byte_offset(b, t->data) + set_bytes(t->data))) / +		sizeof(u64);  } -static inline struct bkey *bkey_next(const struct bkey *k) +static inline struct bset *bset_next_set(struct btree_keys *b, +					 unsigned block_bytes)  { -	uint64_t *d = (void *) k; -	return (struct bkey *) (d + bkey_u64s(k)); +	struct bset *i = bset_tree_last(b)->data; + +	return ((void *) i) + roundup(set_bytes(i), block_bytes);  } -/* Keylists */ +void bch_btree_keys_free(struct btree_keys *); +int bch_btree_keys_alloc(struct btree_keys *, unsigned, gfp_t); +void bch_btree_keys_init(struct btree_keys *, const struct btree_keys_ops *, +			 bool *); + +void bch_bset_init_next(struct btree_keys *, struct bset *, uint64_t); +void bch_bset_build_written_tree(struct btree_keys *); +void bch_bset_fix_invalidated_key(struct btree_keys *, struct bkey *); +bool bch_bkey_try_merge(struct btree_keys *, struct bkey *, struct bkey *); +void bch_bset_insert(struct btree_keys *, struct bkey *, struct bkey *); +unsigned bch_btree_insert_key(struct btree_keys *, struct bkey *, +			      struct bkey *); + +enum { +	BTREE_INSERT_STATUS_NO_INSERT = 0, +	BTREE_INSERT_STATUS_INSERT, +	BTREE_INSERT_STATUS_BACK_MERGE, +	BTREE_INSERT_STATUS_OVERWROTE, +	BTREE_INSERT_STATUS_FRONT_MERGE, +}; -struct keylist { -	struct bkey		*top; -	union { -		uint64_t		*list; -		struct bkey		*bottom; -	}; +/* Btree key iteration */ -	/* Enough room for btree_split's keys without realloc */ -#define KEYLIST_INLINE		16 -	uint64_t		d[KEYLIST_INLINE]; +struct btree_iter { +	size_t size, used; +#ifdef CONFIG_BCACHE_DEBUG +	struct btree_keys *b; +#endif +	struct btree_iter_set { +		struct bkey *k, *end; +	} data[MAX_BSETS];  }; -static inline void bch_keylist_init(struct keylist *l) +typedef bool (*ptr_filter_fn)(struct btree_keys *, const struct bkey *); + +struct bkey *bch_btree_iter_next(struct btree_iter *); +struct bkey *bch_btree_iter_next_filter(struct btree_iter *, +					struct btree_keys *, ptr_filter_fn); + +void bch_btree_iter_push(struct btree_iter *, struct bkey *, struct bkey *); +struct bkey *bch_btree_iter_init(struct btree_keys *, struct btree_iter *, +				 struct bkey *); + +struct bkey *__bch_bset_search(struct btree_keys *, struct bset_tree *, +			       const struct bkey *); + +/* + * Returns the first key that is strictly greater than search + */ +static inline struct bkey *bch_bset_search(struct btree_keys *b, +					   struct bset_tree *t, +					   const struct bkey *search)  { -	l->top = (void *) (l->list = l->d); +	return search ? __bch_bset_search(b, t, search) : t->data->start;  } -static inline void bch_keylist_push(struct keylist *l) +#define for_each_key_filter(b, k, iter, filter)				\ +	for (bch_btree_iter_init((b), (iter), NULL);			\ +	     ((k) = bch_btree_iter_next_filter((iter), (b), filter));) + +#define for_each_key(b, k, iter)					\ +	for (bch_btree_iter_init((b), (iter), NULL);			\ +	     ((k) = bch_btree_iter_next(iter));) + +/* Sorting */ + +struct bset_sort_state { +	mempool_t		*pool; + +	unsigned		page_order; +	unsigned		crit_factor; + +	struct time_stats	time; +}; + +void bch_bset_sort_state_free(struct bset_sort_state *); +int bch_bset_sort_state_init(struct bset_sort_state *, unsigned); +void bch_btree_sort_lazy(struct btree_keys *, struct bset_sort_state *); +void bch_btree_sort_into(struct btree_keys *, struct btree_keys *, +			 struct bset_sort_state *); +void bch_btree_sort_and_fix_extents(struct btree_keys *, struct btree_iter *, +				    struct bset_sort_state *); +void bch_btree_sort_partial(struct btree_keys *, unsigned, +			    struct bset_sort_state *); + +static inline void bch_btree_sort(struct btree_keys *b, +				  struct bset_sort_state *state)  { -	l->top = bkey_next(l->top); +	bch_btree_sort_partial(b, 0, state);  } -static inline void bch_keylist_add(struct keylist *l, struct bkey *k) +struct bset_stats { +	size_t sets_written, sets_unwritten; +	size_t bytes_written, bytes_unwritten; +	size_t floats, failed; +}; + +void bch_btree_keys_stats(struct btree_keys *, struct bset_stats *); + +/* Bkey utility code */ + +#define bset_bkey_last(i)	bkey_idx((struct bkey *) (i)->d, (i)->keys) + +static inline struct bkey *bset_bkey_idx(struct bset *i, unsigned idx)  { -	bkey_copy(l->top, k); -	bch_keylist_push(l); +	return bkey_idx(i->start, idx);  } -static inline bool bch_keylist_empty(struct keylist *l) +static inline void bkey_init(struct bkey *k)  { -	return l->top == (void *) l->list; +	*k = ZERO_KEY;  } -static inline void bch_keylist_free(struct keylist *l) +static __always_inline int64_t bkey_cmp(const struct bkey *l, +					const struct bkey *r)  { -	if (l->list != l->d) -		kfree(l->list); +	return unlikely(KEY_INODE(l) != KEY_INODE(r)) +		? (int64_t) KEY_INODE(l) - (int64_t) KEY_INODE(r) +		: (int64_t) KEY_OFFSET(l) - (int64_t) KEY_OFFSET(r);  } -void bch_keylist_copy(struct keylist *, struct keylist *); -struct bkey *bch_keylist_pop(struct keylist *); -int bch_keylist_realloc(struct keylist *, int, struct cache_set *); -  void bch_bkey_copy_single_ptr(struct bkey *, const struct bkey *,  			      unsigned);  bool __bch_cut_front(const struct bkey *, struct bkey *); @@ -286,98 +417,150 @@ static inline bool bch_cut_back(const struct bkey *where, struct bkey *k)  	return __bch_cut_back(where, k);  } -const char *bch_ptr_status(struct cache_set *, const struct bkey *); -bool __bch_ptr_invalid(struct cache_set *, int level, const struct bkey *); -bool bch_ptr_bad(struct btree *, const struct bkey *); - -static inline uint8_t gen_after(uint8_t a, uint8_t b) +#define PRECEDING_KEY(_k)					\ +({								\ +	struct bkey *_ret = NULL;				\ +								\ +	if (KEY_INODE(_k) || KEY_OFFSET(_k)) {			\ +		_ret = &KEY(KEY_INODE(_k), KEY_OFFSET(_k), 0);	\ +								\ +		if (!_ret->low)					\ +			_ret->high--;				\ +		_ret->low--;					\ +	}							\ +								\ +	_ret;							\ +}) + +static inline bool bch_ptr_invalid(struct btree_keys *b, const struct bkey *k)  { -	uint8_t r = a - b; -	return r > 128U ? 0 : r; +	return b->ops->key_invalid(b, k);  } -static inline uint8_t ptr_stale(struct cache_set *c, const struct bkey *k, -				unsigned i) +static inline bool bch_ptr_bad(struct btree_keys *b, const struct bkey *k)  { -	return gen_after(PTR_BUCKET(c, k, i)->gen, PTR_GEN(k, i)); +	return b->ops->key_bad(b, k);  } -static inline bool ptr_available(struct cache_set *c, const struct bkey *k, -				 unsigned i) +static inline void bch_bkey_to_text(struct btree_keys *b, char *buf, +				    size_t size, const struct bkey *k)  { -	return (PTR_DEV(k, i) < MAX_CACHES_PER_SET) && PTR_CACHE(c, k, i); +	return b->ops->key_to_text(buf, size, k);  } +static inline bool bch_bkey_equal_header(const struct bkey *l, +					 const struct bkey *r) +{ +	return (KEY_DIRTY(l) == KEY_DIRTY(r) && +		KEY_PTRS(l) == KEY_PTRS(r) && +		KEY_CSUM(l) == KEY_CSUM(l)); +} -typedef bool (*ptr_filter_fn)(struct btree *, const struct bkey *); +/* Keylists */ -struct bkey *bch_next_recurse_key(struct btree *, struct bkey *); -struct bkey *bch_btree_iter_next(struct btree_iter *); -struct bkey *bch_btree_iter_next_filter(struct btree_iter *, -					struct btree *, ptr_filter_fn); +struct keylist { +	union { +		struct bkey		*keys; +		uint64_t		*keys_p; +	}; +	union { +		struct bkey		*top; +		uint64_t		*top_p; +	}; -void bch_btree_iter_push(struct btree_iter *, struct bkey *, struct bkey *); -struct bkey *__bch_btree_iter_init(struct btree *, struct btree_iter *, -				   struct bkey *, struct bset_tree *); +	/* Enough room for btree_split's keys without realloc */ +#define KEYLIST_INLINE		16 +	uint64_t		inline_keys[KEYLIST_INLINE]; +}; -/* 32 bits total: */ -#define BKEY_MID_BITS		3 -#define BKEY_EXPONENT_BITS	7 -#define BKEY_MANTISSA_BITS	22 -#define BKEY_MANTISSA_MASK	((1 << BKEY_MANTISSA_BITS) - 1) +static inline void bch_keylist_init(struct keylist *l) +{ +	l->top_p = l->keys_p = l->inline_keys; +} -struct bkey_float { -	unsigned	exponent:BKEY_EXPONENT_BITS; -	unsigned	m:BKEY_MID_BITS; -	unsigned	mantissa:BKEY_MANTISSA_BITS; -} __packed; +static inline void bch_keylist_init_single(struct keylist *l, struct bkey *k) +{ +	l->keys = k; +	l->top = bkey_next(k); +} -/* - * BSET_CACHELINE was originally intended to match the hardware cacheline size - - * it used to be 64, but I realized the lookup code would touch slightly less - * memory if it was 128. - * - * It definites the number of bytes (in struct bset) per struct bkey_float in - * the auxiliar search tree - when we're done searching the bset_float tree we - * have this many bytes left that we do a linear search over. - * - * Since (after level 5) every level of the bset_tree is on a new cacheline, - * we're touching one fewer cacheline in the bset tree in exchange for one more - * cacheline in the linear search - but the linear search might stop before it - * gets to the second cacheline. - */ +static inline void bch_keylist_push(struct keylist *l) +{ +	l->top = bkey_next(l->top); +} -#define BSET_CACHELINE		128 -#define bset_tree_space(b)	(btree_data_space(b) / BSET_CACHELINE) +static inline void bch_keylist_add(struct keylist *l, struct bkey *k) +{ +	bkey_copy(l->top, k); +	bch_keylist_push(l); +} -#define bset_tree_bytes(b)	(bset_tree_space(b) * sizeof(struct bkey_float)) -#define bset_prev_bytes(b)	(bset_tree_space(b) * sizeof(uint8_t)) +static inline bool bch_keylist_empty(struct keylist *l) +{ +	return l->top == l->keys; +} -void bch_bset_init_next(struct btree *); +static inline void bch_keylist_reset(struct keylist *l) +{ +	l->top = l->keys; +} -void bch_bset_fix_invalidated_key(struct btree *, struct bkey *); -void bch_bset_fix_lookup_table(struct btree *, struct bkey *); +static inline void bch_keylist_free(struct keylist *l) +{ +	if (l->keys_p != l->inline_keys) +		kfree(l->keys_p); +} -struct bkey *__bch_bset_search(struct btree *, struct bset_tree *, -			   const struct bkey *); +static inline size_t bch_keylist_nkeys(struct keylist *l) +{ +	return l->top_p - l->keys_p; +} -static inline struct bkey *bch_bset_search(struct btree *b, struct bset_tree *t, -					   const struct bkey *search) +static inline size_t bch_keylist_bytes(struct keylist *l)  { -	return search ? __bch_bset_search(b, t, search) : t->data->start; +	return bch_keylist_nkeys(l) * sizeof(uint64_t);  } -bool bch_bkey_try_merge(struct btree *, struct bkey *, struct bkey *); -void bch_btree_sort_lazy(struct btree *); -void bch_btree_sort_into(struct btree *, struct btree *); -void bch_btree_sort_and_fix_extents(struct btree *, struct btree_iter *); -void bch_btree_sort_partial(struct btree *, unsigned); +struct bkey *bch_keylist_pop(struct keylist *); +void bch_keylist_pop_front(struct keylist *); +int __bch_keylist_realloc(struct keylist *, unsigned); + +/* Debug stuff */ + +#ifdef CONFIG_BCACHE_DEBUG + +int __bch_count_data(struct btree_keys *); +void __bch_check_keys(struct btree_keys *, const char *, ...); +void bch_dump_bset(struct btree_keys *, struct bset *, unsigned); +void bch_dump_bucket(struct btree_keys *); + +#else + +static inline int __bch_count_data(struct btree_keys *b) { return -1; } +static inline void __bch_check_keys(struct btree_keys *b, const char *fmt, ...) {} +static inline void bch_dump_bucket(struct btree_keys *b) {} +void bch_dump_bset(struct btree_keys *, struct bset *, unsigned); + +#endif + +static inline bool btree_keys_expensive_checks(struct btree_keys *b) +{ +#ifdef CONFIG_BCACHE_DEBUG +	return *b->expensive_debug_checks; +#else +	return false; +#endif +} -static inline void bch_btree_sort(struct btree *b) +static inline int bch_count_data(struct btree_keys *b)  { -	bch_btree_sort_partial(b, 0); +	return btree_keys_expensive_checks(b) ? __bch_count_data(b) : -1;  } -int bch_bset_print_stats(struct cache_set *, char *); +#define bch_check_keys(b, ...)						\ +do {									\ +	if (btree_keys_expensive_checks(b))				\ +		__bch_check_keys(b, __VA_ARGS__);			\ +} while (0)  #endif diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index f9764e61978..7347b610096 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -23,12 +23,13 @@  #include "bcache.h"  #include "btree.h"  #include "debug.h" -#include "request.h" -#include "writeback.h" +#include "extents.h"  #include <linux/slab.h>  #include <linux/bitops.h> +#include <linux/freezer.h>  #include <linux/hash.h> +#include <linux/kthread.h>  #include <linux/prefetch.h>  #include <linux/random.h>  #include <linux/rcupdate.h> @@ -67,15 +68,11 @@   * alloc_bucket() cannot fail. This should be true but is not completely   * obvious.   * - * Make sure all allocations get charged to the root cgroup - *   * Plugging?   *   * If data write is less than hard sector size of ssd, round up offset in open   * bucket to the next whole sector   * - * Also lookup by cgroup in get_open_bucket() - *   * Superblock needs to be fleshed out for multiple cache devices   *   * Add a sysfs tunable for the number of writeback IOs in flight @@ -88,15 +85,6 @@   * Test module load/unload   */ -static const char * const op_types[] = { -	"insert", "replace" -}; - -static const char *op_type(struct btree_op *op) -{ -	return op_types[op->type]; -} -  #define MAX_NEED_GC		64  #define MAX_SAVE_PRIO		72 @@ -105,23 +93,96 @@ static const char *op_type(struct btree_op *op)  #define PTR_HASH(c, k)							\  	(((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0)) -struct workqueue_struct *bch_gc_wq; -static struct workqueue_struct *btree_io_wq; +#define insert_lock(s, b)	((b)->level <= (s)->lock) + +/* + * These macros are for recursing down the btree - they handle the details of + * locking and looking up nodes in the cache for you. They're best treated as + * mere syntax when reading code that uses them. + * + * op->lock determines whether we take a read or a write lock at a given depth. + * If you've got a read lock and find that you need a write lock (i.e. you're + * going to have to split), set op->lock and return -EINTR; btree_root() will + * call you again and you'll have the correct lock. + */ + +/** + * btree - recurse down the btree on a specified key + * @fn:		function to call, which will be passed the child node + * @key:	key to recurse on + * @b:		parent btree node + * @op:		pointer to struct btree_op + */ +#define btree(fn, key, b, op, ...)					\ +({									\ +	int _r, l = (b)->level - 1;					\ +	bool _w = l <= (op)->lock;					\ +	struct btree *_child = bch_btree_node_get((b)->c, op, key, l, _w);\ +	if (!IS_ERR(_child)) {						\ +		_child->parent = (b);					\ +		_r = bch_btree_ ## fn(_child, op, ##__VA_ARGS__);	\ +		rw_unlock(_w, _child);					\ +	} else								\ +		_r = PTR_ERR(_child);					\ +	_r;								\ +}) + +/** + * btree_root - call a function on the root of the btree + * @fn:		function to call, which will be passed the child node + * @c:		cache set + * @op:		pointer to struct btree_op + */ +#define btree_root(fn, c, op, ...)					\ +({									\ +	int _r = -EINTR;						\ +	do {								\ +		struct btree *_b = (c)->root;				\ +		bool _w = insert_lock(op, _b);				\ +		rw_lock(_w, _b, _b->level);				\ +		if (_b == (c)->root &&					\ +		    _w == insert_lock(op, _b)) {			\ +			_b->parent = NULL;				\ +			_r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__);	\ +		}							\ +		rw_unlock(_w, _b);					\ +		bch_cannibalize_unlock(c);				\ +		if (_r == -EINTR)					\ +			schedule();					\ +	} while (_r == -EINTR);						\ +									\ +	finish_wait(&(c)->btree_cache_wait, &(op)->wait);		\ +	_r;								\ +}) + +static inline struct bset *write_block(struct btree *b) +{ +	return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c); +} + +static void bch_btree_init_next(struct btree *b) +{ +	/* If not a leaf node, always sort */ +	if (b->level && b->keys.nsets) +		bch_btree_sort(&b->keys, &b->c->sort); +	else +		bch_btree_sort_lazy(&b->keys, &b->c->sort); + +	if (b->written < btree_blocks(b)) +		bch_bset_init_next(&b->keys, write_block(b), +				   bset_magic(&b->c->sb)); -void bch_btree_op_init_stack(struct btree_op *op) -{ -	memset(op, 0, sizeof(struct btree_op)); -	closure_init_stack(&op->cl); -	op->lock = -1; -	bch_keylist_init(&op->keys);  }  /* Btree key manipulation */ -static void bkey_put(struct cache_set *c, struct bkey *k, int level) +void bkey_put(struct cache_set *c, struct bkey *k)  { -	if ((level && KEY_OFFSET(k)) || !level) -		__bkey_put(c, k); +	unsigned i; + +	for (i = 0; i < KEY_PTRS(k); i++) +		if (ptr_available(c, k, i)) +			atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin);  }  /* Btree IO */ @@ -129,38 +190,43 @@ static void bkey_put(struct cache_set *c, struct bkey *k, int level)  static uint64_t btree_csum_set(struct btree *b, struct bset *i)  {  	uint64_t crc = b->key.ptr[0]; -	void *data = (void *) i + 8, *end = end(i); +	void *data = (void *) i + 8, *end = bset_bkey_last(i);  	crc = bch_crc64_update(crc, data, end - data);  	return crc ^ 0xffffffffffffffffULL;  } -static void bch_btree_node_read_done(struct btree *b) +void bch_btree_node_read_done(struct btree *b)  {  	const char *err = "bad btree header"; -	struct bset *i = b->sets[0].data; +	struct bset *i = btree_bset_first(b);  	struct btree_iter *iter;  	iter = mempool_alloc(b->c->fill_iter, GFP_NOWAIT);  	iter->size = b->c->sb.bucket_size / b->c->sb.block_size;  	iter->used = 0; +#ifdef CONFIG_BCACHE_DEBUG +	iter->b = &b->keys; +#endif +  	if (!i->seq)  		goto err;  	for (; -	     b->written < btree_blocks(b) && i->seq == b->sets[0].data->seq; +	     b->written < btree_blocks(b) && i->seq == b->keys.set[0].data->seq;  	     i = write_block(b)) {  		err = "unsupported bset version";  		if (i->version > BCACHE_BSET_VERSION)  			goto err;  		err = "bad btree header"; -		if (b->written + set_blocks(i, b->c) > btree_blocks(b)) +		if (b->written + set_blocks(i, block_bytes(b->c)) > +		    btree_blocks(b))  			goto err;  		err = "bad magic"; -		if (i->magic != bset_magic(b->c)) +		if (i->magic != bset_magic(&b->c->sb))  			goto err;  		err = "bad checksum"; @@ -176,39 +242,40 @@ static void bch_btree_node_read_done(struct btree *b)  		}  		err = "empty set"; -		if (i != b->sets[0].data && !i->keys) +		if (i != b->keys.set[0].data && !i->keys)  			goto err; -		bch_btree_iter_push(iter, i->start, end(i)); +		bch_btree_iter_push(iter, i->start, bset_bkey_last(i)); -		b->written += set_blocks(i, b->c); +		b->written += set_blocks(i, block_bytes(b->c));  	}  	err = "corrupted btree";  	for (i = write_block(b); -	     index(i, b) < btree_blocks(b); +	     bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key);  	     i = ((void *) i) + block_bytes(b->c)) -		if (i->seq == b->sets[0].data->seq) +		if (i->seq == b->keys.set[0].data->seq)  			goto err; -	bch_btree_sort_and_fix_extents(b, iter); +	bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort); -	i = b->sets[0].data; +	i = b->keys.set[0].data;  	err = "short btree key"; -	if (b->sets[0].size && -	    bkey_cmp(&b->key, &b->sets[0].end) < 0) +	if (b->keys.set[0].size && +	    bkey_cmp(&b->key, &b->keys.set[0].end) < 0)  		goto err;  	if (b->written < btree_blocks(b)) -		bch_bset_init_next(b); +		bch_bset_init_next(&b->keys, write_block(b), +				   bset_magic(&b->c->sb));  out:  	mempool_free(iter, b->c->fill_iter);  	return;  err:  	set_btree_node_io_error(b); -	bch_cache_set_error(b->c, "%s at bucket %zu, block %zu, %u keys", +	bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys",  			    err, PTR_BUCKET_NR(b->c, &b->key, 0), -			    index(i, b), i->keys); +			    bset_block_offset(b, i), i->keys);  	goto out;  } @@ -218,7 +285,7 @@ static void btree_node_read_endio(struct bio *bio, int error)  	closure_put(cl);  } -void bch_btree_node_read(struct btree *b) +static void bch_btree_node_read(struct btree *b)  {  	uint64_t start_time = local_clock();  	struct closure cl; @@ -230,11 +297,11 @@ void bch_btree_node_read(struct btree *b)  	bio = bch_bbio_alloc(b->c);  	bio->bi_rw	= REQ_META|READ_SYNC; -	bio->bi_size	= KEY_SIZE(&b->key) << 9; +	bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9;  	bio->bi_end_io	= btree_node_read_endio;  	bio->bi_private	= &cl; -	bch_bio_map(bio, b->sets[0].data); +	bch_bio_map(bio, b->keys.set[0].data);  	bch_submit_bbio(bio, b->c, &b->key, 0);  	closure_sync(&cl); @@ -248,14 +315,11 @@ void bch_btree_node_read(struct btree *b)  		goto err;  	bch_btree_node_read_done(b); - -	spin_lock(&b->c->btree_read_time_lock);  	bch_time_stats_update(&b->c->btree_read_time, start_time); -	spin_unlock(&b->c->btree_read_time_lock);  	return;  err: -	bch_cache_set_error(b->c, "io error reading bucket %lu", +	bch_cache_set_error(b->c, "io error reading bucket %zu",  			    PTR_BUCKET_NR(b->c, &b->key, 0));  } @@ -274,9 +338,16 @@ static void btree_complete_write(struct btree *b, struct btree_write *w)  	w->journal	= NULL;  } +static void btree_node_write_unlock(struct closure *cl) +{ +	struct btree *b = container_of(cl, struct btree, io); + +	up(&b->io_mutex); +} +  static void __btree_node_write_done(struct closure *cl)  { -	struct btree *b = container_of(cl, struct btree, io.cl); +	struct btree *b = container_of(cl, struct btree, io);  	struct btree_write *w = btree_prev_write(b);  	bch_bbio_free(b->bio, b->c); @@ -284,19 +355,18 @@ static void __btree_node_write_done(struct closure *cl)  	btree_complete_write(b, w);  	if (btree_node_dirty(b)) -		queue_delayed_work(btree_io_wq, &b->work, -				   msecs_to_jiffies(30000)); +		schedule_delayed_work(&b->work, 30 * HZ); -	closure_return(cl); +	closure_return_with_destructor(cl, btree_node_write_unlock);  }  static void btree_node_write_done(struct closure *cl)  { -	struct btree *b = container_of(cl, struct btree, io.cl); +	struct btree *b = container_of(cl, struct btree, io);  	struct bio_vec *bv;  	int n; -	__bio_for_each_segment(bv, b->bio, n, 0) +	bio_for_each_segment_all(bv, b->bio, n)  		__free_page(bv->bv_page);  	__btree_node_write_done(cl); @@ -305,7 +375,7 @@ static void btree_node_write_done(struct closure *cl)  static void btree_node_write_endio(struct bio *bio, int error)  {  	struct closure *cl = bio->bi_private; -	struct btree *b = container_of(cl, struct btree, io.cl); +	struct btree *b = container_of(cl, struct btree, io);  	if (error)  		set_btree_node_io_error(b); @@ -316,8 +386,8 @@ static void btree_node_write_endio(struct bio *bio, int error)  static void do_btree_node_write(struct btree *b)  { -	struct closure *cl = &b->io.cl; -	struct bset *i = b->sets[b->nsets].data; +	struct closure *cl = &b->io; +	struct bset *i = btree_bset_last(b);  	BKEY_PADDED(key) k;  	i->version	= BCACHE_BSET_VERSION; @@ -327,9 +397,9 @@ static void do_btree_node_write(struct btree *b)  	b->bio = bch_bbio_alloc(b->c);  	b->bio->bi_end_io	= btree_node_write_endio; -	b->bio->bi_private	= &b->io.cl; +	b->bio->bi_private	= cl;  	b->bio->bi_rw		= REQ_META|WRITE_SYNC|REQ_FUA; -	b->bio->bi_size		= set_blocks(i, b->c) * block_bytes(b->c); +	b->bio->bi_iter.bi_size	= roundup(set_bytes(i), block_bytes(b->c));  	bch_bio_map(b->bio, i);  	/* @@ -348,14 +418,15 @@ static void do_btree_node_write(struct btree *b)  	 */  	bkey_copy(&k.key, &b->key); -	SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) + bset_offset(b, i)); +	SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) + +		       bset_sector_offset(&b->keys, i));  	if (!bio_alloc_pages(b->bio, GFP_NOIO)) {  		int j;  		struct bio_vec *bv;  		void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1)); -		bio_for_each_segment(bv, b->bio, j) +		bio_for_each_segment_all(bv, b->bio, j)  			memcpy(page_address(bv->bv_page),  			       base + j * PAGE_SIZE, PAGE_SIZE); @@ -369,75 +440,106 @@ static void do_btree_node_write(struct btree *b)  		bch_submit_bbio(b->bio, b->c, &k.key, 0);  		closure_sync(cl); -		__btree_node_write_done(cl); +		continue_at_nobarrier(cl, __btree_node_write_done, NULL);  	}  } -void bch_btree_node_write(struct btree *b, struct closure *parent) +void __bch_btree_node_write(struct btree *b, struct closure *parent)  { -	struct bset *i = b->sets[b->nsets].data; +	struct bset *i = btree_bset_last(b); + +	lockdep_assert_held(&b->write_lock);  	trace_bcache_btree_write(b);  	BUG_ON(current->bio_list);  	BUG_ON(b->written >= btree_blocks(b));  	BUG_ON(b->written && !i->keys); -	BUG_ON(b->sets->data->seq != i->seq); -	bch_check_key_order(b, i); +	BUG_ON(btree_bset_first(b)->seq != i->seq); +	bch_check_keys(&b->keys, "writing");  	cancel_delayed_work(&b->work);  	/* If caller isn't waiting for write, parent refcount is cache set */ -	closure_lock(&b->io, parent ?: &b->c->cl); +	down(&b->io_mutex); +	closure_init(&b->io, parent ?: &b->c->cl);  	clear_bit(BTREE_NODE_dirty,	 &b->flags);  	change_bit(BTREE_NODE_write_idx, &b->flags);  	do_btree_node_write(b); -	b->written += set_blocks(i, b->c); -	atomic_long_add(set_blocks(i, b->c) * b->c->sb.block_size, +	atomic_long_add(set_blocks(i, block_bytes(b->c)) * b->c->sb.block_size,  			&PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written); -	bch_btree_sort_lazy(b); +	b->written += set_blocks(i, block_bytes(b->c)); +} -	if (b->written < btree_blocks(b)) -		bch_bset_init_next(b); +void bch_btree_node_write(struct btree *b, struct closure *parent) +{ +	unsigned nsets = b->keys.nsets; + +	lockdep_assert_held(&b->lock); + +	__bch_btree_node_write(b, parent); + +	/* +	 * do verify if there was more than one set initially (i.e. we did a +	 * sort) and we sorted down to a single set: +	 */ +	if (nsets && !b->keys.nsets) +		bch_btree_verify(b); + +	bch_btree_init_next(b); +} + +static void bch_btree_node_write_sync(struct btree *b) +{ +	struct closure cl; + +	closure_init_stack(&cl); + +	mutex_lock(&b->write_lock); +	bch_btree_node_write(b, &cl); +	mutex_unlock(&b->write_lock); + +	closure_sync(&cl);  }  static void btree_node_write_work(struct work_struct *w)  {  	struct btree *b = container_of(to_delayed_work(w), struct btree, work); -	rw_lock(true, b, b->level); - +	mutex_lock(&b->write_lock);  	if (btree_node_dirty(b)) -		bch_btree_node_write(b, NULL); -	rw_unlock(true, b); +		__bch_btree_node_write(b, NULL); +	mutex_unlock(&b->write_lock);  } -static void bch_btree_leaf_dirty(struct btree *b, struct btree_op *op) +static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)  { -	struct bset *i = b->sets[b->nsets].data; +	struct bset *i = btree_bset_last(b);  	struct btree_write *w = btree_current_write(b); +	lockdep_assert_held(&b->write_lock); +  	BUG_ON(!b->written);  	BUG_ON(!i->keys);  	if (!btree_node_dirty(b)) -		queue_delayed_work(btree_io_wq, &b->work, 30 * HZ); +		schedule_delayed_work(&b->work, 30 * HZ);  	set_btree_node_dirty(b); -	if (op && op->journal) { +	if (journal_ref) {  		if (w->journal && -		    journal_pin_cmp(b->c, w, op)) { +		    journal_pin_cmp(b->c, w->journal, journal_ref)) {  			atomic_dec_bug(w->journal);  			w->journal = NULL;  		}  		if (!w->journal) { -			w->journal = op->journal; +			w->journal = journal_ref;  			atomic_inc(w->journal);  		}  	} @@ -453,53 +555,19 @@ static void bch_btree_leaf_dirty(struct btree *b, struct btree_op *op)   * mca -> memory cache   */ -static void mca_reinit(struct btree *b) -{ -	unsigned i; - -	b->flags	= 0; -	b->written	= 0; -	b->nsets	= 0; - -	for (i = 0; i < MAX_BSETS; i++) -		b->sets[i].size = 0; -	/* -	 * Second loop starts at 1 because b->sets[0]->data is the memory we -	 * allocated -	 */ -	for (i = 1; i < MAX_BSETS; i++) -		b->sets[i].data = NULL; -} -  #define mca_reserve(c)	(((c->root && c->root->level)		\  			  ? c->root->level : 1) * 8 + 16)  #define mca_can_free(c)						\ -	max_t(int, 0, c->bucket_cache_used - mca_reserve(c)) +	max_t(int, 0, c->btree_cache_used - mca_reserve(c))  static void mca_data_free(struct btree *b)  { -	struct bset_tree *t = b->sets; -	BUG_ON(!closure_is_unlocked(&b->io.cl)); - -	if (bset_prev_bytes(b) < PAGE_SIZE) -		kfree(t->prev); -	else -		free_pages((unsigned long) t->prev, -			   get_order(bset_prev_bytes(b))); - -	if (bset_tree_bytes(b) < PAGE_SIZE) -		kfree(t->tree); -	else -		free_pages((unsigned long) t->tree, -			   get_order(bset_tree_bytes(b))); +	BUG_ON(b->io_mutex.count != 1); -	free_pages((unsigned long) t->data, b->page_order); +	bch_btree_keys_free(&b->keys); -	t->prev = NULL; -	t->tree = NULL; -	t->data = NULL; +	b->c->btree_cache_used--;  	list_move(&b->list, &b->c->btree_cache_freed); -	b->c->bucket_cache_used--;  }  static void mca_bucket_free(struct btree *b) @@ -518,34 +586,16 @@ static unsigned btree_order(struct bkey *k)  static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)  { -	struct bset_tree *t = b->sets; -	BUG_ON(t->data); - -	b->page_order = max_t(unsigned, -			      ilog2(b->c->btree_pages), -			      btree_order(k)); - -	t->data = (void *) __get_free_pages(gfp, b->page_order); -	if (!t->data) -		goto err; - -	t->tree = bset_tree_bytes(b) < PAGE_SIZE -		? kmalloc(bset_tree_bytes(b), gfp) -		: (void *) __get_free_pages(gfp, get_order(bset_tree_bytes(b))); -	if (!t->tree) -		goto err; - -	t->prev = bset_prev_bytes(b) < PAGE_SIZE -		? kmalloc(bset_prev_bytes(b), gfp) -		: (void *) __get_free_pages(gfp, get_order(bset_prev_bytes(b))); -	if (!t->prev) -		goto err; - -	list_move(&b->list, &b->c->btree_cache); -	b->c->bucket_cache_used++; -	return; -err: -	mca_data_free(b); +	if (!bch_btree_keys_alloc(&b->keys, +				  max_t(unsigned, +					ilog2(b->c->btree_pages), +					btree_order(k)), +				  gfp)) { +		b->c->btree_cache_used++; +		list_move(&b->list, &b->c->btree_cache); +	} else { +		list_move(&b->list, &b->c->btree_cache_freed); +	}  }  static struct btree *mca_bucket_alloc(struct cache_set *c, @@ -557,44 +607,56 @@ static struct btree *mca_bucket_alloc(struct cache_set *c,  	init_rwsem(&b->lock);  	lockdep_set_novalidate_class(&b->lock); +	mutex_init(&b->write_lock); +	lockdep_set_novalidate_class(&b->write_lock);  	INIT_LIST_HEAD(&b->list);  	INIT_DELAYED_WORK(&b->work, btree_node_write_work);  	b->c = c; -	closure_init_unlocked(&b->io); +	sema_init(&b->io_mutex, 1);  	mca_data_alloc(b, k, gfp);  	return b;  } -static int mca_reap(struct btree *b, struct closure *cl, unsigned min_order) +static int mca_reap(struct btree *b, unsigned min_order, bool flush)  { +	struct closure cl; + +	closure_init_stack(&cl);  	lockdep_assert_held(&b->c->bucket_lock);  	if (!down_write_trylock(&b->lock))  		return -ENOMEM; -	if (b->page_order < min_order) { -		rw_unlock(true, b); -		return -ENOMEM; -	} +	BUG_ON(btree_node_dirty(b) && !b->keys.set[0].data); -	BUG_ON(btree_node_dirty(b) && !b->sets[0].data); +	if (b->keys.page_order < min_order) +		goto out_unlock; -	if (cl && btree_node_dirty(b)) -		bch_btree_node_write(b, NULL); - -	if (cl) -		closure_wait_event_async(&b->io.wait, cl, -			 atomic_read(&b->io.cl.remaining) == -1); +	if (!flush) { +		if (btree_node_dirty(b)) +			goto out_unlock; -	if (btree_node_dirty(b) || -	    !closure_is_unlocked(&b->io.cl) || -	    work_pending(&b->work.work)) { -		rw_unlock(true, b); -		return -EAGAIN; +		if (down_trylock(&b->io_mutex)) +			goto out_unlock; +		up(&b->io_mutex);  	} +	mutex_lock(&b->write_lock); +	if (btree_node_dirty(b)) +		__bch_btree_node_write(b, &cl); +	mutex_unlock(&b->write_lock); + +	closure_sync(&cl); + +	/* wait for any in flight btree write */ +	down(&b->io_mutex); +	up(&b->io_mutex); +  	return 0; +out_unlock: +	rw_unlock(true, b); +	return -ENOMEM;  }  static unsigned long bch_mca_scan(struct shrinker *shrink, @@ -608,11 +670,11 @@ static unsigned long bch_mca_scan(struct shrinker *shrink,  	if (c->shrinker_disabled)  		return SHRINK_STOP; -	if (c->try_harder) +	if (c->btree_cache_alloc_lock)  		return SHRINK_STOP;  	/* Return -1 if we can't do anything right now */ -	if (sc->gfp_mask & __GFP_WAIT) +	if (sc->gfp_mask & __GFP_IO)  		mutex_lock(&c->bucket_lock);  	else if (!mutex_trylock(&c->bucket_lock))  		return -1; @@ -633,26 +695,22 @@ static unsigned long bch_mca_scan(struct shrinker *shrink,  			break;  		if (++i > 3 && -		    !mca_reap(b, NULL, 0)) { +		    !mca_reap(b, 0, false)) {  			mca_data_free(b);  			rw_unlock(true, b);  			freed++;  		}  	} -	/* -	 * Can happen right when we first start up, before we've read in any -	 * btree nodes -	 */ -	if (list_empty(&c->btree_cache)) -		goto out; +	for (i = 0; (nr--) && i < c->btree_cache_used; i++) { +		if (list_empty(&c->btree_cache)) +			goto out; -	for (i = 0; (nr--) && i < c->bucket_cache_used; i++) {  		b = list_first_entry(&c->btree_cache, struct btree, list);  		list_rotate_left(&c->btree_cache);  		if (!b->accessed && -		    !mca_reap(b, NULL, 0)) { +		    !mca_reap(b, 0, false)) {  			mca_bucket_free(b);  			mca_data_free(b);  			rw_unlock(true, b); @@ -673,7 +731,7 @@ static unsigned long bch_mca_count(struct shrinker *shrink,  	if (c->shrinker_disabled)  		return 0; -	if (c->try_harder) +	if (c->btree_cache_alloc_lock)  		return 0;  	return mca_can_free(c) * c->btree_pages; @@ -693,6 +751,8 @@ void bch_btree_cache_free(struct cache_set *c)  #ifdef CONFIG_BCACHE_DEBUG  	if (c->verify_data)  		list_move(&c->verify_data->list, &c->btree_cache); + +	free_pages((unsigned long) c->verify_ondisk, ilog2(bucket_pages(c)));  #endif  	list_splice(&c->btree_cache_freeable, @@ -723,12 +783,9 @@ int bch_btree_cache_alloc(struct cache_set *c)  {  	unsigned i; -	/* XXX: doesn't check for errors */ - -	closure_init_unlocked(&c->gc); -  	for (i = 0; i < mca_reserve(c); i++) -		mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL); +		if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL)) +			return -ENOMEM;  	list_splice_init(&c->btree_cache,  			 &c->btree_cache_freeable); @@ -736,10 +793,13 @@ int bch_btree_cache_alloc(struct cache_set *c)  #ifdef CONFIG_BCACHE_DEBUG  	mutex_init(&c->verify_lock); +	c->verify_ondisk = (void *) +		__get_free_pages(GFP_KERNEL, ilog2(bucket_pages(c))); +  	c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL);  	if (c->verify_data && -	    c->verify_data->sets[0].data) +	    c->verify_data->keys.set->data)  		list_del_init(&c->verify_data->list);  	else  		c->verify_data = NULL; @@ -775,52 +835,41 @@ out:  	return b;  } -static struct btree *mca_cannibalize(struct cache_set *c, struct bkey *k, -				     int level, struct closure *cl) +static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op)  { -	int ret = -ENOMEM; -	struct btree *i; +	struct task_struct *old; -	trace_bcache_btree_cache_cannibalize(c); +	old = cmpxchg(&c->btree_cache_alloc_lock, NULL, current); +	if (old && old != current) { +		if (op) +			prepare_to_wait(&c->btree_cache_wait, &op->wait, +					TASK_UNINTERRUPTIBLE); +		return -EINTR; +	} + +	return 0; +} -	if (!cl) -		return ERR_PTR(-ENOMEM); +static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op, +				     struct bkey *k) +{ +	struct btree *b; -	/* -	 * Trying to free up some memory - i.e. reuse some btree nodes - may -	 * require initiating IO to flush the dirty part of the node. If we're -	 * running under generic_make_request(), that IO will never finish and -	 * we would deadlock. Returning -EAGAIN causes the cache lookup code to -	 * punt to workqueue and retry. -	 */ -	if (current->bio_list) -		return ERR_PTR(-EAGAIN); +	trace_bcache_btree_cache_cannibalize(c); -	if (c->try_harder && c->try_harder != cl) { -		closure_wait_event_async(&c->try_wait, cl, !c->try_harder); -		return ERR_PTR(-EAGAIN); -	} +	if (mca_cannibalize_lock(c, op)) +		return ERR_PTR(-EINTR); -	c->try_harder = cl; -	c->try_harder_start = local_clock(); -retry: -	list_for_each_entry_reverse(i, &c->btree_cache, list) { -		int r = mca_reap(i, cl, btree_order(k)); -		if (!r) -			return i; -		if (r != -ENOMEM) -			ret = r; -	} +	list_for_each_entry_reverse(b, &c->btree_cache, list) +		if (!mca_reap(b, btree_order(k), false)) +			return b; -	if (ret == -EAGAIN && -	    closure_blocking(cl)) { -		mutex_unlock(&c->bucket_lock); -		closure_sync(cl); -		mutex_lock(&c->bucket_lock); -		goto retry; -	} +	list_for_each_entry_reverse(b, &c->btree_cache, list) +		if (!mca_reap(b, btree_order(k), true)) +			return b; -	return ERR_PTR(ret); +	WARN(1, "btree cache cannibalize failed\n"); +	return ERR_PTR(-ENOMEM);  }  /* @@ -829,20 +878,21 @@ retry:   * cannibalize_bucket() will take. This means every time we unlock the root of   * the btree, we need to release this lock if we have it held.   */ -void bch_cannibalize_unlock(struct cache_set *c, struct closure *cl) +static void bch_cannibalize_unlock(struct cache_set *c)  { -	if (c->try_harder == cl) { -		bch_time_stats_update(&c->try_harder_time, c->try_harder_start); -		c->try_harder = NULL; -		__closure_wake_up(&c->try_wait); +	if (c->btree_cache_alloc_lock == current) { +		c->btree_cache_alloc_lock = NULL; +		wake_up(&c->btree_cache_wait);  	}  } -static struct btree *mca_alloc(struct cache_set *c, struct bkey *k, -			       int level, struct closure *cl) +static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op, +			       struct bkey *k, int level)  {  	struct btree *b; +	BUG_ON(current->bio_list); +  	lockdep_assert_held(&c->bucket_lock);  	if (mca_find(c, k)) @@ -852,16 +902,16 @@ static struct btree *mca_alloc(struct cache_set *c, struct bkey *k,  	 * the list. Check if there's any freed nodes there:  	 */  	list_for_each_entry(b, &c->btree_cache_freeable, list) -		if (!mca_reap(b, NULL, btree_order(k))) +		if (!mca_reap(b, btree_order(k), false))  			goto out;  	/* We never free struct btree itself, just the memory that holds the on  	 * disk node. Check the freed list before allocating a new one:  	 */  	list_for_each_entry(b, &c->btree_cache_freed, list) -		if (!mca_reap(b, NULL, 0)) { +		if (!mca_reap(b, 0, false)) {  			mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO); -			if (!b->sets[0].data) +			if (!b->keys.set[0].data)  				goto err;  			else  				goto out; @@ -872,10 +922,10 @@ static struct btree *mca_alloc(struct cache_set *c, struct bkey *k,  		goto err;  	BUG_ON(!down_write_trylock(&b->lock)); -	if (!b->sets->data) +	if (!b->keys.set->data)  		goto err;  out: -	BUG_ON(!closure_is_unlocked(&b->io.cl)); +	BUG_ON(b->io_mutex.count != 1);  	bkey_copy(&b->key, k);  	list_move(&b->list, &c->btree_cache); @@ -883,16 +933,24 @@ out:  	hlist_add_head_rcu(&b->hash, mca_hash(c, k));  	lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_); +	b->parent	= (void *) ~0UL; +	b->flags	= 0; +	b->written	= 0;  	b->level	= level; -	mca_reinit(b); +	if (!b->level) +		bch_btree_keys_init(&b->keys, &bch_extent_keys_ops, +				    &b->c->expensive_debug_checks); +	else +		bch_btree_keys_init(&b->keys, &bch_btree_keys_ops, +				    &b->c->expensive_debug_checks);  	return b;  err:  	if (b)  		rw_unlock(true, b); -	b = mca_cannibalize(c, k, level, cl); +	b = mca_cannibalize(c, op, k);  	if (!IS_ERR(b))  		goto out; @@ -903,17 +961,15 @@ err:   * bch_btree_node_get - find a btree node in the cache and lock it, reading it   * in from disk if necessary.   * - * If IO is necessary, it uses the closure embedded in struct btree_op to wait; - * if that closure is in non blocking mode, will return -EAGAIN. + * If IO is necessary and running under generic_make_request, returns -EAGAIN.   *   * The btree node will have either a read or a write lock held, depending on   * level and op->lock.   */ -struct btree *bch_btree_node_get(struct cache_set *c, struct bkey *k, -				 int level, struct btree_op *op) +struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op, +				 struct bkey *k, int level, bool write)  {  	int i = 0; -	bool write = level <= op->lock;  	struct btree *b;  	BUG_ON(level < 0); @@ -925,7 +981,7 @@ retry:  			return ERR_PTR(-EAGAIN);  		mutex_lock(&c->bucket_lock); -		b = mca_alloc(c, k, level, &op->cl); +		b = mca_alloc(c, op, k, level);  		mutex_unlock(&c->bucket_lock);  		if (!b) @@ -948,13 +1004,13 @@ retry:  	b->accessed = 1; -	for (; i <= b->nsets && b->sets[i].size; i++) { -		prefetch(b->sets[i].tree); -		prefetch(b->sets[i].data); +	for (; i <= b->keys.nsets && b->keys.set[i].size; i++) { +		prefetch(b->keys.set[i].tree); +		prefetch(b->keys.set[i].data);  	} -	for (; i <= b->nsets; i++) -		prefetch(b->sets[i].data); +	for (; i <= b->keys.nsets; i++) +		prefetch(b->keys.set[i].data);  	if (btree_node_io_error(b)) {  		rw_unlock(write, b); @@ -971,7 +1027,7 @@ static void btree_node_prefetch(struct cache_set *c, struct bkey *k, int level)  	struct btree *b;  	mutex_lock(&c->bucket_lock); -	b = mca_alloc(c, k, level, NULL); +	b = mca_alloc(c, NULL, k, level);  	mutex_unlock(&c->bucket_lock);  	if (!IS_ERR_OR_NULL(b)) { @@ -982,65 +1038,54 @@ static void btree_node_prefetch(struct cache_set *c, struct bkey *k, int level)  /* Btree alloc */ -static void btree_node_free(struct btree *b, struct btree_op *op) +static void btree_node_free(struct btree *b)  { -	unsigned i; -  	trace_bcache_btree_node_free(b); -	/* -	 * The BUG_ON() in btree_node_get() implies that we must have a write -	 * lock on parent to free or even invalidate a node -	 */ -	BUG_ON(op->lock <= b->level);  	BUG_ON(b == b->c->root); +	mutex_lock(&b->write_lock); +  	if (btree_node_dirty(b))  		btree_complete_write(b, btree_current_write(b));  	clear_bit(BTREE_NODE_dirty, &b->flags); +	mutex_unlock(&b->write_lock); +  	cancel_delayed_work(&b->work);  	mutex_lock(&b->c->bucket_lock); - -	for (i = 0; i < KEY_PTRS(&b->key); i++) { -		BUG_ON(atomic_read(&PTR_BUCKET(b->c, &b->key, i)->pin)); - -		bch_inc_gen(PTR_CACHE(b->c, &b->key, i), -			    PTR_BUCKET(b->c, &b->key, i)); -	} -  	bch_bucket_free(b->c, &b->key);  	mca_bucket_free(b);  	mutex_unlock(&b->c->bucket_lock);  } -struct btree *bch_btree_node_alloc(struct cache_set *c, int level, -				   struct closure *cl) +struct btree *bch_btree_node_alloc(struct cache_set *c, struct btree_op *op, +				   int level)  {  	BKEY_PADDED(key) k;  	struct btree *b = ERR_PTR(-EAGAIN);  	mutex_lock(&c->bucket_lock);  retry: -	if (__bch_bucket_alloc_set(c, WATERMARK_METADATA, &k.key, 1, cl)) +	if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, op != NULL))  		goto err; +	bkey_put(c, &k.key);  	SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS); -	b = mca_alloc(c, &k.key, level, cl); +	b = mca_alloc(c, op, &k.key, level);  	if (IS_ERR(b))  		goto err_free;  	if (!b) {  		cache_bug(c,  			"Tried to allocate bucket that was in btree cache"); -		__bkey_put(c, &k.key);  		goto retry;  	}  	b->accessed = 1; -	bch_bset_init_next(b); +	bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->sb));  	mutex_unlock(&c->bucket_lock); @@ -1048,7 +1093,6 @@ retry:  	return b;  err_free:  	bch_bucket_free(c, &k.key); -	__bkey_put(c, &k.key);  err:  	mutex_unlock(&c->bucket_lock); @@ -1057,18 +1101,64 @@ err:  }  static struct btree *btree_node_alloc_replacement(struct btree *b, -						  struct closure *cl) +						  struct btree_op *op)  { -	struct btree *n = bch_btree_node_alloc(b->c, b->level, cl); -	if (!IS_ERR_OR_NULL(n)) -		bch_btree_sort_into(b, n); +	struct btree *n = bch_btree_node_alloc(b->c, op, b->level); +	if (!IS_ERR_OR_NULL(n)) { +		mutex_lock(&n->write_lock); +		bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort); +		bkey_copy_key(&n->key, &b->key); +		mutex_unlock(&n->write_lock); +	}  	return n;  } +static void make_btree_freeing_key(struct btree *b, struct bkey *k) +{ +	unsigned i; + +	mutex_lock(&b->c->bucket_lock); + +	atomic_inc(&b->c->prio_blocked); + +	bkey_copy(k, &b->key); +	bkey_copy_key(k, &ZERO_KEY); + +	for (i = 0; i < KEY_PTRS(k); i++) +		SET_PTR_GEN(k, i, +			    bch_inc_gen(PTR_CACHE(b->c, &b->key, i), +					PTR_BUCKET(b->c, &b->key, i))); + +	mutex_unlock(&b->c->bucket_lock); +} + +static int btree_check_reserve(struct btree *b, struct btree_op *op) +{ +	struct cache_set *c = b->c; +	struct cache *ca; +	unsigned i, reserve = (c->root->level - b->level) * 2 + 1; + +	mutex_lock(&c->bucket_lock); + +	for_each_cache(ca, c, i) +		if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) { +			if (op) +				prepare_to_wait(&c->btree_cache_wait, &op->wait, +						TASK_UNINTERRUPTIBLE); +			mutex_unlock(&c->bucket_lock); +			return -EINTR; +		} + +	mutex_unlock(&c->bucket_lock); + +	return mca_cannibalize_lock(b->c, op); +} +  /* Garbage collection */ -uint8_t __bch_btree_mark_key(struct cache_set *c, int level, struct bkey *k) +static uint8_t __bch_btree_mark_key(struct cache_set *c, int level, +				    struct bkey *k)  {  	uint8_t stale = 0;  	unsigned i; @@ -1088,8 +1178,8 @@ uint8_t __bch_btree_mark_key(struct cache_set *c, int level, struct bkey *k)  		g = PTR_BUCKET(c, k, i); -		if (gen_after(g->gc_gen, PTR_GEN(k, i))) -			g->gc_gen = PTR_GEN(k, i); +		if (gen_after(g->last_gc, PTR_GEN(k, i))) +			g->last_gc = PTR_GEN(k, i);  		if (ptr_stale(c, k, i)) {  			stale = max(stale, ptr_stale(c, k, i)); @@ -1105,11 +1195,13 @@ uint8_t __bch_btree_mark_key(struct cache_set *c, int level, struct bkey *k)  			SET_GC_MARK(g, GC_MARK_METADATA);  		else if (KEY_DIRTY(k))  			SET_GC_MARK(g, GC_MARK_DIRTY); +		else if (!GC_MARK(g)) +			SET_GC_MARK(g, GC_MARK_RECLAIMABLE);  		/* guard against overflow */  		SET_GC_SECTORS_USED(g, min_t(unsigned,  					     GC_SECTORS_USED(g) + KEY_SIZE(k), -					     (1 << 14) - 1)); +					     MAX_GC_SECTORS_USED));  		BUG_ON(!GC_SECTORS_USED(g));  	} @@ -1119,120 +1211,143 @@ uint8_t __bch_btree_mark_key(struct cache_set *c, int level, struct bkey *k)  #define btree_mark_key(b, k)	__bch_btree_mark_key(b->c, b->level, k) -static int btree_gc_mark_node(struct btree *b, unsigned *keys, -			      struct gc_stat *gc) +void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k) +{ +	unsigned i; + +	for (i = 0; i < KEY_PTRS(k); i++) +		if (ptr_available(c, k, i) && +		    !ptr_stale(c, k, i)) { +			struct bucket *b = PTR_BUCKET(c, k, i); + +			b->gen = PTR_GEN(k, i); + +			if (level && bkey_cmp(k, &ZERO_KEY)) +				b->prio = BTREE_PRIO; +			else if (!level && b->prio == BTREE_PRIO) +				b->prio = INITIAL_PRIO; +		} + +	__bch_btree_mark_key(c, level, k); +} + +static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)  {  	uint8_t stale = 0; -	unsigned last_dev = -1; -	struct bcache_device *d = NULL; +	unsigned keys = 0, good_keys = 0;  	struct bkey *k;  	struct btree_iter iter;  	struct bset_tree *t;  	gc->nodes++; -	for_each_key_filter(b, k, &iter, bch_ptr_invalid) { -		if (last_dev != KEY_INODE(k)) { -			last_dev = KEY_INODE(k); - -			d = KEY_INODE(k) < b->c->nr_uuids -				? b->c->devices[last_dev] -				: NULL; -		} - +	for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) {  		stale = max(stale, btree_mark_key(b, k)); +		keys++; -		if (bch_ptr_bad(b, k)) +		if (bch_ptr_bad(&b->keys, k))  			continue; -		*keys += bkey_u64s(k); -  		gc->key_bytes += bkey_u64s(k);  		gc->nkeys++; +		good_keys++;  		gc->data += KEY_SIZE(k); -		if (KEY_DIRTY(k)) -			gc->dirty += KEY_SIZE(k);  	} -	for (t = b->sets; t <= &b->sets[b->nsets]; t++) +	for (t = b->keys.set; t <= &b->keys.set[b->keys.nsets]; t++)  		btree_bug_on(t->size && -			     bset_written(b, t) && +			     bset_written(&b->keys, t) &&  			     bkey_cmp(&b->key, &t->end) < 0,  			     b, "found short btree key in gc"); -	return stale; -} - -static struct btree *btree_gc_alloc(struct btree *b, struct bkey *k, -				    struct btree_op *op) -{ -	/* -	 * We block priorities from being written for the duration of garbage -	 * collection, so we can't sleep in btree_alloc() -> -	 * bch_bucket_alloc_set(), or we'd risk deadlock - so we don't pass it -	 * our closure. -	 */ -	struct btree *n = btree_node_alloc_replacement(b, NULL); - -	if (!IS_ERR_OR_NULL(n)) { -		swap(b, n); -		__bkey_put(b->c, &b->key); +	if (b->c->gc_always_rewrite) +		return true; -		memcpy(k->ptr, b->key.ptr, -		       sizeof(uint64_t) * KEY_PTRS(&b->key)); +	if (stale > 10) +		return true; -		btree_node_free(n, op); -		up_write(&n->lock); -	} +	if ((keys - good_keys) * 2 > keys) +		return true; -	return b; +	return false;  } -/* - * Leaving this at 2 until we've got incremental garbage collection done; it - * could be higher (and has been tested with 4) except that garbage collection - * could take much longer, adversely affecting latency. - */ -#define GC_MERGE_NODES	2U +#define GC_MERGE_NODES	4U  struct gc_merge_info {  	struct btree	*b; -	struct bkey	*k;  	unsigned	keys;  }; -static void btree_gc_coalesce(struct btree *b, struct btree_op *op, -			      struct gc_stat *gc, struct gc_merge_info *r) +static int bch_btree_insert_node(struct btree *, struct btree_op *, +				 struct keylist *, atomic_t *, struct bkey *); + +static int btree_gc_coalesce(struct btree *b, struct btree_op *op, +			     struct gc_stat *gc, struct gc_merge_info *r)  { -	unsigned nodes = 0, keys = 0, blocks; -	int i; +	unsigned i, nodes = 0, keys = 0, blocks; +	struct btree *new_nodes[GC_MERGE_NODES]; +	struct keylist keylist; +	struct closure cl; +	struct bkey *k; + +	bch_keylist_init(&keylist); + +	if (btree_check_reserve(b, NULL)) +		return 0; + +	memset(new_nodes, 0, sizeof(new_nodes)); +	closure_init_stack(&cl); -	while (nodes < GC_MERGE_NODES && r[nodes].b) +	while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b))  		keys += r[nodes++].keys;  	blocks = btree_default_blocks(b->c) * 2 / 3;  	if (nodes < 2 || -	    __set_blocks(b->sets[0].data, keys, b->c) > blocks * (nodes - 1)) -		return; - -	for (i = nodes - 1; i >= 0; --i) { -		if (r[i].b->written) -			r[i].b = btree_gc_alloc(r[i].b, r[i].k, op); +	    __set_blocks(b->keys.set[0].data, keys, +			 block_bytes(b->c)) > blocks * (nodes - 1)) +		return 0; -		if (r[i].b->written) -			return; +	for (i = 0; i < nodes; i++) { +		new_nodes[i] = btree_node_alloc_replacement(r[i].b, NULL); +		if (IS_ERR_OR_NULL(new_nodes[i])) +			goto out_nocoalesce;  	} +	/* +	 * We have to check the reserve here, after we've allocated our new +	 * nodes, to make sure the insert below will succeed - we also check +	 * before as an optimization to potentially avoid a bunch of expensive +	 * allocs/sorts +	 */ +	if (btree_check_reserve(b, NULL)) +		goto out_nocoalesce; + +	for (i = 0; i < nodes; i++) +		mutex_lock(&new_nodes[i]->write_lock); +  	for (i = nodes - 1; i > 0; --i) { -		struct bset *n1 = r[i].b->sets->data; -		struct bset *n2 = r[i - 1].b->sets->data; +		struct bset *n1 = btree_bset_first(new_nodes[i]); +		struct bset *n2 = btree_bset_first(new_nodes[i - 1]);  		struct bkey *k, *last = NULL;  		keys = 0; -		if (i == 1) { +		if (i > 1) { +			for (k = n2->start; +			     k < bset_bkey_last(n2); +			     k = bkey_next(k)) { +				if (__set_blocks(n1, n1->keys + keys + +						 bkey_u64s(k), +						 block_bytes(b->c)) > blocks) +					break; + +				last = k; +				keys += bkey_u64s(k); +			} +		} else {  			/*  			 * Last node we're not getting rid of - we're getting  			 * rid of the node at r[0]. Have to try and fit all of @@ -1241,133 +1356,226 @@ static void btree_gc_coalesce(struct btree *b, struct btree_op *op,  			 * length keys (shouldn't be possible in practice,  			 * though)  			 */ -			if (__set_blocks(n1, n1->keys + r->keys, -					 b->c) > btree_blocks(r[i].b)) -				return; +			if (__set_blocks(n1, n1->keys + n2->keys, +					 block_bytes(b->c)) > +			    btree_blocks(new_nodes[i])) +				goto out_nocoalesce;  			keys = n2->keys; +			/* Take the key of the node we're getting rid of */  			last = &r->b->key; -		} else -			for (k = n2->start; -			     k < end(n2); -			     k = bkey_next(k)) { -				if (__set_blocks(n1, n1->keys + keys + -						 bkey_u64s(k), b->c) > blocks) -					break; - -				last = k; -				keys += bkey_u64s(k); -			} +		} -		BUG_ON(__set_blocks(n1, n1->keys + keys, -				    b->c) > btree_blocks(r[i].b)); +		BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c)) > +		       btree_blocks(new_nodes[i])); -		if (last) { -			bkey_copy_key(&r[i].b->key, last); -			bkey_copy_key(r[i].k, last); -		} +		if (last) +			bkey_copy_key(&new_nodes[i]->key, last); -		memcpy(end(n1), +		memcpy(bset_bkey_last(n1),  		       n2->start, -		       (void *) node(n2, keys) - (void *) n2->start); +		       (void *) bset_bkey_idx(n2, keys) - (void *) n2->start);  		n1->keys += keys; +		r[i].keys = n1->keys;  		memmove(n2->start, -			node(n2, keys), -			(void *) end(n2) - (void *) node(n2, keys)); +			bset_bkey_idx(n2, keys), +			(void *) bset_bkey_last(n2) - +			(void *) bset_bkey_idx(n2, keys));  		n2->keys -= keys; -		r[i].keys	= n1->keys; -		r[i - 1].keys	= n2->keys; +		if (__bch_keylist_realloc(&keylist, +					  bkey_u64s(&new_nodes[i]->key))) +			goto out_nocoalesce; + +		bch_btree_node_write(new_nodes[i], &cl); +		bch_keylist_add(&keylist, &new_nodes[i]->key);  	} -	btree_node_free(r->b, op); -	up_write(&r->b->lock); +	for (i = 0; i < nodes; i++) +		mutex_unlock(&new_nodes[i]->write_lock); -	trace_bcache_btree_gc_coalesce(nodes); +	closure_sync(&cl); + +	/* We emptied out this node */ +	BUG_ON(btree_bset_first(new_nodes[0])->keys); +	btree_node_free(new_nodes[0]); +	rw_unlock(true, new_nodes[0]); +	for (i = 0; i < nodes; i++) { +		if (__bch_keylist_realloc(&keylist, bkey_u64s(&r[i].b->key))) +			goto out_nocoalesce; + +		make_btree_freeing_key(r[i].b, keylist.top); +		bch_keylist_push(&keylist); +	} + +	bch_btree_insert_node(b, op, &keylist, NULL, NULL); +	BUG_ON(!bch_keylist_empty(&keylist)); + +	for (i = 0; i < nodes; i++) { +		btree_node_free(r[i].b); +		rw_unlock(true, r[i].b); + +		r[i].b = new_nodes[i]; +	} + +	memmove(r, r + 1, sizeof(r[0]) * (nodes - 1)); +	r[nodes - 1].b = ERR_PTR(-EINTR); + +	trace_bcache_btree_gc_coalesce(nodes);  	gc->nodes--; -	nodes--; -	memmove(&r[0], &r[1], sizeof(struct gc_merge_info) * nodes); -	memset(&r[nodes], 0, sizeof(struct gc_merge_info)); +	bch_keylist_free(&keylist); + +	/* Invalidated our iterator */ +	return -EINTR; + +out_nocoalesce: +	closure_sync(&cl); +	bch_keylist_free(&keylist); + +	while ((k = bch_keylist_pop(&keylist))) +		if (!bkey_cmp(k, &ZERO_KEY)) +			atomic_dec(&b->c->prio_blocked); + +	for (i = 0; i < nodes; i++) +		if (!IS_ERR_OR_NULL(new_nodes[i])) { +			btree_node_free(new_nodes[i]); +			rw_unlock(true, new_nodes[i]); +		} +	return 0;  } -static int btree_gc_recurse(struct btree *b, struct btree_op *op, -			    struct closure *writes, struct gc_stat *gc) +static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op, +				 struct btree *replace)  { -	void write(struct btree *r) -	{ -		if (!r->written) -			bch_btree_node_write(r, &op->cl); -		else if (btree_node_dirty(r)) -			bch_btree_node_write(r, writes); +	struct keylist keys; +	struct btree *n; + +	if (btree_check_reserve(b, NULL)) +		return 0; -		up_write(&r->lock); +	n = btree_node_alloc_replacement(replace, NULL); + +	/* recheck reserve after allocating replacement node */ +	if (btree_check_reserve(b, NULL)) { +		btree_node_free(n); +		rw_unlock(true, n); +		return 0;  	} -	int ret = 0, stale; -	unsigned i; -	struct gc_merge_info r[GC_MERGE_NODES]; +	bch_btree_node_write_sync(n); -	memset(r, 0, sizeof(r)); +	bch_keylist_init(&keys); +	bch_keylist_add(&keys, &n->key); -	while ((r->k = bch_next_recurse_key(b, &b->c->gc_done))) { -		r->b = bch_btree_node_get(b->c, r->k, b->level - 1, op); +	make_btree_freeing_key(replace, keys.top); +	bch_keylist_push(&keys); -		if (IS_ERR(r->b)) { -			ret = PTR_ERR(r->b); -			break; -		} +	bch_btree_insert_node(b, op, &keys, NULL, NULL); +	BUG_ON(!bch_keylist_empty(&keys)); -		r->keys	= 0; -		stale = btree_gc_mark_node(r->b, &r->keys, gc); +	btree_node_free(replace); +	rw_unlock(true, n); -		if (!b->written && -		    (r->b->level || stale > 10 || -		     b->c->gc_always_rewrite)) -			r->b = btree_gc_alloc(r->b, r->k, op); +	/* Invalidated our iterator */ +	return -EINTR; +} + +static unsigned btree_gc_count_keys(struct btree *b) +{ +	struct bkey *k; +	struct btree_iter iter; +	unsigned ret = 0; -		if (r->b->level) -			ret = btree_gc_recurse(r->b, op, writes, gc); +	for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad) +		ret += bkey_u64s(k); -		if (ret) { -			write(r->b); -			break; +	return ret; +} + +static int btree_gc_recurse(struct btree *b, struct btree_op *op, +			    struct closure *writes, struct gc_stat *gc) +{ +	int ret = 0; +	bool should_rewrite; +	struct bkey *k; +	struct btree_iter iter; +	struct gc_merge_info r[GC_MERGE_NODES]; +	struct gc_merge_info *i, *last = r + ARRAY_SIZE(r) - 1; + +	bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done); + +	for (i = r; i < r + ARRAY_SIZE(r); i++) +		i->b = ERR_PTR(-EINTR); + +	while (1) { +		k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad); +		if (k) { +			r->b = bch_btree_node_get(b->c, op, k, b->level - 1, +						  true); +			if (IS_ERR(r->b)) { +				ret = PTR_ERR(r->b); +				break; +			} + +			r->keys = btree_gc_count_keys(r->b); + +			ret = btree_gc_coalesce(b, op, gc, r); +			if (ret) +				break;  		} -		bkey_copy_key(&b->c->gc_done, r->k); +		if (!last->b) +			break; -		if (!b->written) -			btree_gc_coalesce(b, op, gc, r); +		if (!IS_ERR(last->b)) { +			should_rewrite = btree_gc_mark_node(last->b, gc); +			if (should_rewrite) { +				ret = btree_gc_rewrite_node(b, op, last->b); +				if (ret) +					break; +			} -		if (r[GC_MERGE_NODES - 1].b) -			write(r[GC_MERGE_NODES - 1].b); +			if (last->b->level) { +				ret = btree_gc_recurse(last->b, op, writes, gc); +				if (ret) +					break; +			} -		memmove(&r[1], &r[0], -			sizeof(struct gc_merge_info) * (GC_MERGE_NODES - 1)); +			bkey_copy_key(&b->c->gc_done, &last->b->key); + +			/* +			 * Must flush leaf nodes before gc ends, since replace +			 * operations aren't journalled +			 */ +			mutex_lock(&last->b->write_lock); +			if (btree_node_dirty(last->b)) +				bch_btree_node_write(last->b, writes); +			mutex_unlock(&last->b->write_lock); +			rw_unlock(true, last->b); +		} + +		memmove(r + 1, r, sizeof(r[0]) * (GC_MERGE_NODES - 1)); +		r->b = NULL; -		/* When we've got incremental GC working, we'll want to do -		 * if (should_resched()) -		 *	return -EAGAIN; -		 */ -		cond_resched(); -#if 0  		if (need_resched()) {  			ret = -EAGAIN;  			break;  		} -#endif  	} -	for (i = 1; i < GC_MERGE_NODES && r[i].b; i++) -		write(r[i].b); - -	/* Might have freed some children, must remove their keys */ -	if (!b->written) -		bch_btree_sort(b); +	for (i = r; i < r + ARRAY_SIZE(r); i++) +		if (!IS_ERR_OR_NULL(i->b)) { +			mutex_lock(&i->b->write_lock); +			if (btree_node_dirty(i->b)) +				bch_btree_node_write(i->b, writes); +			mutex_unlock(&i->b->write_lock); +			rw_unlock(true, i->b); +		}  	return ret;  } @@ -1376,29 +1584,34 @@ static int bch_btree_gc_root(struct btree *b, struct btree_op *op,  			     struct closure *writes, struct gc_stat *gc)  {  	struct btree *n = NULL; -	unsigned keys = 0; -	int ret = 0, stale = btree_gc_mark_node(b, &keys, gc); +	int ret = 0; +	bool should_rewrite; -	if (b->level || stale > 10) +	should_rewrite = btree_gc_mark_node(b, gc); +	if (should_rewrite) {  		n = btree_node_alloc_replacement(b, NULL); -	if (!IS_ERR_OR_NULL(n)) -		swap(b, n); +		if (!IS_ERR_OR_NULL(n)) { +			bch_btree_node_write_sync(n); -	if (b->level) -		ret = btree_gc_recurse(b, op, writes, gc); +			bch_btree_set_root(n); +			btree_node_free(b); +			rw_unlock(true, n); -	if (!b->written || btree_node_dirty(b)) { -		bch_btree_node_write(b, n ? &op->cl : NULL); +			return -EINTR; +		}  	} -	if (!IS_ERR_OR_NULL(n)) { -		closure_sync(&op->cl); -		bch_btree_set_root(b); -		btree_node_free(n, op); -		rw_unlock(true, b); +	__bch_btree_mark_key(b->c, b->level + 1, &b->key); + +	if (b->level) { +		ret = btree_gc_recurse(b, op, writes, gc); +		if (ret) +			return ret;  	} +	bkey_copy_key(&b->c->gc_done, &b->key); +  	return ret;  } @@ -1418,9 +1631,9 @@ static void btree_gc_start(struct cache_set *c)  	for_each_cache(ca, c, i)  		for_each_bucket(b, ca) { -			b->gc_gen = b->gen; +			b->last_gc = b->gen;  			if (!atomic_read(&b->pin)) { -				SET_GC_MARK(b, GC_MARK_RECLAIMABLE); +				SET_GC_MARK(b, 0);  				SET_GC_SECTORS_USED(b, 0);  			}  		} @@ -1428,7 +1641,7 @@ static void btree_gc_start(struct cache_set *c)  	mutex_unlock(&c->bucket_lock);  } -size_t bch_btree_gc_finish(struct cache_set *c) +static size_t bch_btree_gc_finish(struct cache_set *c)  {  	size_t available = 0;  	struct bucket *b; @@ -1441,15 +1654,32 @@ size_t bch_btree_gc_finish(struct cache_set *c)  	c->gc_mark_valid = 1;  	c->need_gc	= 0; -	if (c->root) -		for (i = 0; i < KEY_PTRS(&c->root->key); i++) -			SET_GC_MARK(PTR_BUCKET(c, &c->root->key, i), -				    GC_MARK_METADATA); -  	for (i = 0; i < KEY_PTRS(&c->uuid_bucket); i++)  		SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i),  			    GC_MARK_METADATA); +	/* don't reclaim buckets to which writeback keys point */ +	rcu_read_lock(); +	for (i = 0; i < c->nr_uuids; i++) { +		struct bcache_device *d = c->devices[i]; +		struct cached_dev *dc; +		struct keybuf_key *w, *n; +		unsigned j; + +		if (!d || UUID_FLASH_ONLY(&c->uuids[i])) +			continue; +		dc = container_of(d, struct cached_dev, disk); + +		spin_lock(&dc->writeback_keys.lock); +		rbtree_postorder_for_each_entry_safe(w, n, +					&dc->writeback_keys.keys, node) +			for (j = 0; j < KEY_PTRS(&w->key); j++) +				SET_GC_MARK(PTR_BUCKET(c, &w->key, j), +					    GC_MARK_DIRTY); +		spin_unlock(&dc->writeback_keys.lock); +	} +	rcu_read_unlock(); +  	for_each_cache(ca, c, i) {  		uint64_t *i; @@ -1463,15 +1693,15 @@ size_t bch_btree_gc_finish(struct cache_set *c)  			SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);  		for_each_bucket(b, ca) { -			b->last_gc	= b->gc_gen;  			c->need_gc	= max(c->need_gc, bucket_gc_gen(b)); -			if (!atomic_read(&b->pin) && -			    GC_MARK(b) == GC_MARK_RECLAIMABLE) { +			if (atomic_read(&b->pin)) +				continue; + +			BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b)); + +			if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE)  				available++; -				if (!GC_SECTORS_USED(b)) -					bch_bucket_add_unused(ca, b); -			}  		}  	} @@ -1479,9 +1709,8 @@ size_t bch_btree_gc_finish(struct cache_set *c)  	return available;  } -static void bch_btree_gc(struct closure *cl) +static void bch_btree_gc(struct cache_set *c)  { -	struct cache_set *c = container_of(cl, struct cache_set, gc.cl);  	int ret;  	unsigned long available;  	struct gc_stat stats; @@ -1493,632 +1722,505 @@ static void bch_btree_gc(struct closure *cl)  	memset(&stats, 0, sizeof(struct gc_stat));  	closure_init_stack(&writes); -	bch_btree_op_init_stack(&op); -	op.lock = SHRT_MAX; +	bch_btree_op_init(&op, SHRT_MAX);  	btree_gc_start(c); -	atomic_inc(&c->prio_blocked); - -	ret = btree_root(gc_root, c, &op, &writes, &stats); -	closure_sync(&op.cl); -	closure_sync(&writes); - -	if (ret) { -		pr_warn("gc failed!"); -		continue_at(cl, bch_btree_gc, bch_gc_wq); -	} +	do { +		ret = btree_root(gc_root, c, &op, &writes, &stats); +		closure_sync(&writes); -	/* Possibly wait for new UUIDs or whatever to hit disk */ -	bch_journal_meta(c, &op.cl); -	closure_sync(&op.cl); +		if (ret && ret != -EAGAIN) +			pr_warn("gc failed!"); +	} while (ret);  	available = bch_btree_gc_finish(c); - -	atomic_dec(&c->prio_blocked);  	wake_up_allocators(c);  	bch_time_stats_update(&c->btree_gc_time, start_time);  	stats.key_bytes *= sizeof(uint64_t); -	stats.dirty	<<= 9;  	stats.data	<<= 9;  	stats.in_use	= (c->nbuckets - available) * 100 / c->nbuckets;  	memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat));  	trace_bcache_gc_end(c); -	continue_at(cl, bch_moving_gc, bch_gc_wq); +	bch_moving_gc(c);  } -void bch_queue_gc(struct cache_set *c) -{ -	closure_trylock_call(&c->gc.cl, bch_btree_gc, bch_gc_wq, &c->cl); -} - -/* Initial partial gc */ - -static int bch_btree_check_recurse(struct btree *b, struct btree_op *op, -				   unsigned long **seen) +static int bch_gc_thread(void *arg)  { -	int ret; +	struct cache_set *c = arg; +	struct cache *ca;  	unsigned i; -	struct bkey *k; -	struct bucket *g; -	struct btree_iter iter; -	for_each_key_filter(b, k, &iter, bch_ptr_invalid) { -		for (i = 0; i < KEY_PTRS(k); i++) { -			if (!ptr_available(b->c, k, i)) -				continue; +	while (1) { +again: +		bch_btree_gc(c); -			g = PTR_BUCKET(b->c, k, i); +		set_current_state(TASK_INTERRUPTIBLE); +		if (kthread_should_stop()) +			break; -			if (!__test_and_set_bit(PTR_BUCKET_NR(b->c, k, i), -						seen[PTR_DEV(k, i)]) || -			    !ptr_stale(b->c, k, i)) { -				g->gen = PTR_GEN(k, i); +		mutex_lock(&c->bucket_lock); -				if (b->level) -					g->prio = BTREE_PRIO; -				else if (g->prio == BTREE_PRIO) -					g->prio = INITIAL_PRIO; +		for_each_cache(ca, c, i) +			if (ca->invalidate_needs_gc) { +				mutex_unlock(&c->bucket_lock); +				set_current_state(TASK_RUNNING); +				goto again;  			} -		} - -		btree_mark_key(b, k); -	} - -	if (b->level) { -		k = bch_next_recurse_key(b, &ZERO_KEY); -		while (k) { -			struct bkey *p = bch_next_recurse_key(b, k); -			if (p) -				btree_node_prefetch(b->c, p, b->level - 1); - -			ret = btree(check_recurse, k, b, op, seen); -			if (ret) -				return ret; +		mutex_unlock(&c->bucket_lock); -			k = p; -		} +		try_to_freeze(); +		schedule();  	}  	return 0;  } -int bch_btree_check(struct cache_set *c, struct btree_op *op) +int bch_gc_thread_start(struct cache_set *c)  { -	int ret = -ENOMEM; -	unsigned i; -	unsigned long *seen[MAX_CACHES_PER_SET]; +	c->gc_thread = kthread_create(bch_gc_thread, c, "bcache_gc"); +	if (IS_ERR(c->gc_thread)) +		return PTR_ERR(c->gc_thread); -	memset(seen, 0, sizeof(seen)); - -	for (i = 0; c->cache[i]; i++) { -		size_t n = DIV_ROUND_UP(c->cache[i]->sb.nbuckets, 8); -		seen[i] = kmalloc(n, GFP_KERNEL); -		if (!seen[i]) -			goto err; - -		/* Disables the seen array until prio_read() uses it too */ -		memset(seen[i], 0xFF, n); -	} - -	ret = btree_root(check_recurse, c, op, seen); -err: -	for (i = 0; i < MAX_CACHES_PER_SET; i++) -		kfree(seen[i]); -	return ret; +	set_task_state(c->gc_thread, TASK_INTERRUPTIBLE); +	return 0;  } -/* Btree insertion */ - -static void shift_keys(struct btree *b, struct bkey *where, struct bkey *insert) -{ -	struct bset *i = b->sets[b->nsets].data; - -	memmove((uint64_t *) where + bkey_u64s(insert), -		where, -		(void *) end(i) - (void *) where); - -	i->keys += bkey_u64s(insert); -	bkey_copy(where, insert); -	bch_bset_fix_lookup_table(b, where); -} +/* Initial partial gc */ -static bool fix_overlapping_extents(struct btree *b, -				    struct bkey *insert, -				    struct btree_iter *iter, -				    struct btree_op *op) +static int bch_btree_check_recurse(struct btree *b, struct btree_op *op)  { -	void subtract_dirty(struct bkey *k, uint64_t offset, int sectors) -	{ -		if (KEY_DIRTY(k)) -			bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k), -						     offset, -sectors); -	} - -	uint64_t old_offset; -	unsigned old_size, sectors_found = 0; - -	while (1) { -		struct bkey *k = bch_btree_iter_next(iter); -		if (!k || -		    bkey_cmp(&START_KEY(k), insert) >= 0) -			break; +	int ret = 0; +	struct bkey *k, *p = NULL; +	struct btree_iter iter; -		if (bkey_cmp(k, &START_KEY(insert)) <= 0) -			continue; +	for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) +		bch_initial_mark_key(b->c, b->level, k); -		old_offset = KEY_START(k); -		old_size = KEY_SIZE(k); +	bch_initial_mark_key(b->c, b->level + 1, &b->key); -		/* -		 * We might overlap with 0 size extents; we can't skip these -		 * because if they're in the set we're inserting to we have to -		 * adjust them so they don't overlap with the key we're -		 * inserting. But we don't want to check them for BTREE_REPLACE -		 * operations. -		 */ +	if (b->level) { +		bch_btree_iter_init(&b->keys, &iter, NULL); -		if (op->type == BTREE_REPLACE && -		    KEY_SIZE(k)) { -			/* -			 * k might have been split since we inserted/found the -			 * key we're replacing -			 */ -			unsigned i; -			uint64_t offset = KEY_START(k) - -				KEY_START(&op->replace); +		do { +			k = bch_btree_iter_next_filter(&iter, &b->keys, +						       bch_ptr_bad); +			if (k) +				btree_node_prefetch(b->c, k, b->level - 1); -			/* But it must be a subset of the replace key */ -			if (KEY_START(k) < KEY_START(&op->replace) || -			    KEY_OFFSET(k) > KEY_OFFSET(&op->replace)) -				goto check_failed; +			if (p) +				ret = btree(check_recurse, p, b, op); -			/* We didn't find a key that we were supposed to */ -			if (KEY_START(k) > KEY_START(insert) + sectors_found) -				goto check_failed; +			p = k; +		} while (p && !ret); +	} -			if (KEY_PTRS(&op->replace) != KEY_PTRS(k)) -				goto check_failed; +	return ret; +} -			/* skip past gen */ -			offset <<= 8; +int bch_btree_check(struct cache_set *c) +{ +	struct btree_op op; -			BUG_ON(!KEY_PTRS(&op->replace)); +	bch_btree_op_init(&op, SHRT_MAX); -			for (i = 0; i < KEY_PTRS(&op->replace); i++) -				if (k->ptr[i] != op->replace.ptr[i] + offset) -					goto check_failed; +	return btree_root(check_recurse, c, &op); +} -			sectors_found = KEY_OFFSET(k) - KEY_START(insert); -		} +void bch_initial_gc_finish(struct cache_set *c) +{ +	struct cache *ca; +	struct bucket *b; +	unsigned i; -		if (bkey_cmp(insert, k) < 0 && -		    bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0) { -			/* -			 * We overlapped in the middle of an existing key: that -			 * means we have to split the old key. But we have to do -			 * slightly different things depending on whether the -			 * old key has been written out yet. -			 */ +	bch_btree_gc_finish(c); -			struct bkey *top; - -			subtract_dirty(k, KEY_START(insert), KEY_SIZE(insert)); - -			if (bkey_written(b, k)) { -				/* -				 * We insert a new key to cover the top of the -				 * old key, and the old key is modified in place -				 * to represent the bottom split. -				 * -				 * It's completely arbitrary whether the new key -				 * is the top or the bottom, but it has to match -				 * up with what btree_sort_fixup() does - it -				 * doesn't check for this kind of overlap, it -				 * depends on us inserting a new key for the top -				 * here. -				 */ -				top = bch_bset_search(b, &b->sets[b->nsets], -						      insert); -				shift_keys(b, top, k); -			} else { -				BKEY_PADDED(key) temp; -				bkey_copy(&temp.key, k); -				shift_keys(b, k, &temp.key); -				top = bkey_next(k); -			} +	mutex_lock(&c->bucket_lock); -			bch_cut_front(insert, top); -			bch_cut_back(&START_KEY(insert), k); -			bch_bset_fix_invalidated_key(b, k); -			return false; -		} +	/* +	 * We need to put some unused buckets directly on the prio freelist in +	 * order to get the allocator thread started - it needs freed buckets in +	 * order to rewrite the prios and gens, and it needs to rewrite prios +	 * and gens in order to free buckets. +	 * +	 * This is only safe for buckets that have no live data in them, which +	 * there should always be some of. +	 */ +	for_each_cache(ca, c, i) { +		for_each_bucket(b, ca) { +			if (fifo_full(&ca->free[RESERVE_PRIO])) +				break; -		if (bkey_cmp(insert, k) < 0) { -			bch_cut_front(insert, k); -		} else { -			if (bkey_written(b, k) && -			    bkey_cmp(&START_KEY(insert), &START_KEY(k)) <= 0) { -				/* -				 * Completely overwrote, so we don't have to -				 * invalidate the binary search tree -				 */ -				bch_cut_front(k, k); -			} else { -				__bch_cut_back(&START_KEY(insert), k); -				bch_bset_fix_invalidated_key(b, k); +			if (bch_can_invalidate_bucket(ca, b) && +			    !GC_MARK(b)) { +				__bch_invalidate_one_bucket(ca, b); +				fifo_push(&ca->free[RESERVE_PRIO], +					  b - ca->buckets);  			}  		} - -		subtract_dirty(k, old_offset, old_size - KEY_SIZE(k));  	} -check_failed: -	if (op->type == BTREE_REPLACE) { -		if (!sectors_found) { -			op->insert_collision = true; -			return true; -		} else if (sectors_found < KEY_SIZE(insert)) { -			SET_KEY_OFFSET(insert, KEY_OFFSET(insert) - -				       (KEY_SIZE(insert) - sectors_found)); -			SET_KEY_SIZE(insert, sectors_found); -		} -	} - -	return false; +	mutex_unlock(&c->bucket_lock);  } -static bool btree_insert_key(struct btree *b, struct btree_op *op, -			     struct bkey *k) +/* Btree insertion */ + +static bool btree_insert_key(struct btree *b, struct bkey *k, +			     struct bkey *replace_key)  { -	struct bset *i = b->sets[b->nsets].data; -	struct bkey *m, *prev; -	unsigned status = BTREE_INSERT_STATUS_INSERT; +	unsigned status;  	BUG_ON(bkey_cmp(k, &b->key) > 0); -	BUG_ON(b->level && !KEY_PTRS(k)); -	BUG_ON(!b->level && !KEY_OFFSET(k)); -	if (!b->level) { -		struct btree_iter iter; -		struct bkey search = KEY(KEY_INODE(k), KEY_START(k), 0); +	status = bch_btree_insert_key(&b->keys, k, replace_key); +	if (status != BTREE_INSERT_STATUS_NO_INSERT) { +		bch_check_keys(&b->keys, "%u for %s", status, +			       replace_key ? "replace" : "insert"); -		/* -		 * bset_search() returns the first key that is strictly greater -		 * than the search key - but for back merging, we want to find -		 * the first key that is greater than or equal to KEY_START(k) - -		 * unless KEY_START(k) is 0. -		 */ -		if (KEY_OFFSET(&search)) -			SET_KEY_OFFSET(&search, KEY_OFFSET(&search) - 1); - -		prev = NULL; -		m = bch_btree_iter_init(b, &iter, &search); - -		if (fix_overlapping_extents(b, k, &iter, op)) -			return false; - -		while (m != end(i) && -		       bkey_cmp(k, &START_KEY(m)) > 0) -			prev = m, m = bkey_next(m); - -		if (key_merging_disabled(b->c)) -			goto insert; - -		/* prev is in the tree, if we merge we're done */ -		status = BTREE_INSERT_STATUS_BACK_MERGE; -		if (prev && -		    bch_bkey_try_merge(b, prev, k)) -			goto merged; - -		status = BTREE_INSERT_STATUS_OVERWROTE; -		if (m != end(i) && -		    KEY_PTRS(m) == KEY_PTRS(k) && !KEY_SIZE(m)) -			goto copy; - -		status = BTREE_INSERT_STATUS_FRONT_MERGE; -		if (m != end(i) && -		    bch_bkey_try_merge(b, k, m)) -			goto copy; +		trace_bcache_btree_insert_key(b, k, replace_key != NULL, +					      status); +		return true;  	} else -		m = bch_bset_search(b, &b->sets[b->nsets], k); - -insert:	shift_keys(b, m, k); -copy:	bkey_copy(m, k); -merged: -	if (KEY_DIRTY(k)) -		bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k), -					     KEY_START(k), KEY_SIZE(k)); - -	bch_check_keys(b, "%u for %s", status, op_type(op)); - -	if (b->level && !KEY_OFFSET(k)) -		btree_current_write(b)->prio_blocked++; - -	trace_bcache_btree_insert_key(b, k, op->type, status); - -	return true; +		return false;  } -static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op) +static size_t insert_u64s_remaining(struct btree *b)  { -	bool ret = false; -	struct bkey *k; -	unsigned oldsize = bch_count_data(b); +	long ret = bch_btree_keys_u64s_remaining(&b->keys); -	while ((k = bch_keylist_pop(&op->keys))) { -		bkey_put(b->c, k, b->level); -		ret |= btree_insert_key(b, op, k); -	} +	/* +	 * Might land in the middle of an existing extent and have to split it +	 */ +	if (b->keys.ops->is_extents) +		ret -= KEY_MAX_U64S; -	BUG_ON(bch_count_data(b) < oldsize); -	return ret; +	return max(ret, 0L);  } -bool bch_btree_insert_check_key(struct btree *b, struct btree_op *op, -				   struct bio *bio) +static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op, +				  struct keylist *insert_keys, +				  struct bkey *replace_key)  {  	bool ret = false; -	uint64_t btree_ptr = b->key.ptr[0]; -	unsigned long seq = b->seq; -	BKEY_PADDED(k) tmp; +	int oldsize = bch_count_data(&b->keys); -	rw_unlock(false, b); -	rw_lock(true, b, b->level); +	while (!bch_keylist_empty(insert_keys)) { +		struct bkey *k = insert_keys->keys; -	if (b->key.ptr[0] != btree_ptr || -	    b->seq != seq + 1 || -	    should_split(b)) -		goto out; +		if (bkey_u64s(k) > insert_u64s_remaining(b)) +			break; + +		if (bkey_cmp(k, &b->key) <= 0) { +			if (!b->level) +				bkey_put(b->c, k); + +			ret |= btree_insert_key(b, k, replace_key); +			bch_keylist_pop_front(insert_keys); +		} else if (bkey_cmp(&START_KEY(k), &b->key) < 0) { +			BKEY_PADDED(key) temp; +			bkey_copy(&temp.key, insert_keys->keys); -	op->replace = KEY(op->inode, bio_end_sector(bio), bio_sectors(bio)); +			bch_cut_back(&b->key, &temp.key); +			bch_cut_front(&b->key, insert_keys->keys); -	SET_KEY_PTRS(&op->replace, 1); -	get_random_bytes(&op->replace.ptr[0], sizeof(uint64_t)); +			ret |= btree_insert_key(b, &temp.key, replace_key); +			break; +		} else { +			break; +		} +	} -	SET_PTR_DEV(&op->replace, 0, PTR_CHECK_DEV); +	if (!ret) +		op->insert_collision = true; -	bkey_copy(&tmp.k, &op->replace); +	BUG_ON(!bch_keylist_empty(insert_keys) && b->level); -	BUG_ON(op->type != BTREE_INSERT); -	BUG_ON(!btree_insert_key(b, op, &tmp.k)); -	ret = true; -out: -	downgrade_write(&b->lock); +	BUG_ON(bch_count_data(&b->keys) < oldsize);  	return ret;  } -static int btree_split(struct btree *b, struct btree_op *op) +static int btree_split(struct btree *b, struct btree_op *op, +		       struct keylist *insert_keys, +		       struct bkey *replace_key)  { -	bool split, root = b == b->c->root; +	bool split;  	struct btree *n1, *n2 = NULL, *n3 = NULL;  	uint64_t start_time = local_clock(); +	struct closure cl; +	struct keylist parent_keys; -	if (b->level) -		set_closure_blocking(&op->cl); +	closure_init_stack(&cl); +	bch_keylist_init(&parent_keys); -	n1 = btree_node_alloc_replacement(b, &op->cl); +	if (btree_check_reserve(b, op)) { +		if (!b->level) +			return -EINTR; +		else +			WARN(1, "insufficient reserve for split\n"); +	} + +	n1 = btree_node_alloc_replacement(b, op);  	if (IS_ERR(n1))  		goto err; -	split = set_blocks(n1->sets[0].data, n1->c) > (btree_blocks(b) * 4) / 5; +	split = set_blocks(btree_bset_first(n1), +			   block_bytes(n1->c)) > (btree_blocks(b) * 4) / 5;  	if (split) {  		unsigned keys = 0; -		trace_bcache_btree_node_split(b, n1->sets[0].data->keys); +		trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys); -		n2 = bch_btree_node_alloc(b->c, b->level, &op->cl); +		n2 = bch_btree_node_alloc(b->c, op, b->level);  		if (IS_ERR(n2))  			goto err_free1; -		if (root) { -			n3 = bch_btree_node_alloc(b->c, b->level + 1, &op->cl); +		if (!b->parent) { +			n3 = bch_btree_node_alloc(b->c, op, b->level + 1);  			if (IS_ERR(n3))  				goto err_free2;  		} -		bch_btree_insert_keys(n1, op); +		mutex_lock(&n1->write_lock); +		mutex_lock(&n2->write_lock); + +		bch_btree_insert_keys(n1, op, insert_keys, replace_key); -		/* Has to be a linear search because we don't have an auxiliary +		/* +		 * Has to be a linear search because we don't have an auxiliary  		 * search tree yet  		 */ -		while (keys < (n1->sets[0].data->keys * 3) / 5) -			keys += bkey_u64s(node(n1->sets[0].data, keys)); +		while (keys < (btree_bset_first(n1)->keys * 3) / 5) +			keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1), +							keys)); -		bkey_copy_key(&n1->key, node(n1->sets[0].data, keys)); -		keys += bkey_u64s(node(n1->sets[0].data, keys)); +		bkey_copy_key(&n1->key, +			      bset_bkey_idx(btree_bset_first(n1), keys)); +		keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1), keys)); -		n2->sets[0].data->keys = n1->sets[0].data->keys - keys; -		n1->sets[0].data->keys = keys; +		btree_bset_first(n2)->keys = btree_bset_first(n1)->keys - keys; +		btree_bset_first(n1)->keys = keys; -		memcpy(n2->sets[0].data->start, -		       end(n1->sets[0].data), -		       n2->sets[0].data->keys * sizeof(uint64_t)); +		memcpy(btree_bset_first(n2)->start, +		       bset_bkey_last(btree_bset_first(n1)), +		       btree_bset_first(n2)->keys * sizeof(uint64_t));  		bkey_copy_key(&n2->key, &b->key); -		bch_keylist_add(&op->keys, &n2->key); -		bch_btree_node_write(n2, &op->cl); +		bch_keylist_add(&parent_keys, &n2->key); +		bch_btree_node_write(n2, &cl); +		mutex_unlock(&n2->write_lock);  		rw_unlock(true, n2);  	} else { -		trace_bcache_btree_node_compact(b, n1->sets[0].data->keys); +		trace_bcache_btree_node_compact(b, btree_bset_first(n1)->keys); -		bch_btree_insert_keys(n1, op); +		mutex_lock(&n1->write_lock); +		bch_btree_insert_keys(n1, op, insert_keys, replace_key);  	} -	bch_keylist_add(&op->keys, &n1->key); -	bch_btree_node_write(n1, &op->cl); +	bch_keylist_add(&parent_keys, &n1->key); +	bch_btree_node_write(n1, &cl); +	mutex_unlock(&n1->write_lock);  	if (n3) { +		/* Depth increases, make a new root */ +		mutex_lock(&n3->write_lock);  		bkey_copy_key(&n3->key, &MAX_KEY); -		bch_btree_insert_keys(n3, op); -		bch_btree_node_write(n3, &op->cl); +		bch_btree_insert_keys(n3, op, &parent_keys, NULL); +		bch_btree_node_write(n3, &cl); +		mutex_unlock(&n3->write_lock); -		closure_sync(&op->cl); +		closure_sync(&cl);  		bch_btree_set_root(n3);  		rw_unlock(true, n3); -	} else if (root) { -		op->keys.top = op->keys.bottom; -		closure_sync(&op->cl); +	} else if (!b->parent) { +		/* Root filled up but didn't need to be split */ +		closure_sync(&cl);  		bch_btree_set_root(n1);  	} else { -		unsigned i; - -		bkey_copy(op->keys.top, &b->key); -		bkey_copy_key(op->keys.top, &ZERO_KEY); - -		for (i = 0; i < KEY_PTRS(&b->key); i++) { -			uint8_t g = PTR_BUCKET(b->c, &b->key, i)->gen + 1; +		/* Split a non root node */ +		closure_sync(&cl); +		make_btree_freeing_key(b, parent_keys.top); +		bch_keylist_push(&parent_keys); -			SET_PTR_GEN(op->keys.top, i, g); -		} - -		bch_keylist_push(&op->keys); -		closure_sync(&op->cl); -		atomic_inc(&b->c->prio_blocked); +		bch_btree_insert_node(b->parent, op, &parent_keys, NULL, NULL); +		BUG_ON(!bch_keylist_empty(&parent_keys));  	} +	btree_node_free(b);  	rw_unlock(true, n1); -	btree_node_free(b, op);  	bch_time_stats_update(&b->c->btree_split_time, start_time);  	return 0;  err_free2: -	__bkey_put(n2->c, &n2->key); -	btree_node_free(n2, op); +	bkey_put(b->c, &n2->key); +	btree_node_free(n2);  	rw_unlock(true, n2);  err_free1: -	__bkey_put(n1->c, &n1->key); -	btree_node_free(n1, op); +	bkey_put(b->c, &n1->key); +	btree_node_free(n1);  	rw_unlock(true, n1);  err: +	WARN(1, "bcache: btree split failed (level %u)", b->level); +  	if (n3 == ERR_PTR(-EAGAIN) ||  	    n2 == ERR_PTR(-EAGAIN) ||  	    n1 == ERR_PTR(-EAGAIN))  		return -EAGAIN; -	pr_warn("couldn't split");  	return -ENOMEM;  } -static int bch_btree_insert_recurse(struct btree *b, struct btree_op *op, -				    struct keylist *stack_keys) +static int bch_btree_insert_node(struct btree *b, struct btree_op *op, +				 struct keylist *insert_keys, +				 atomic_t *journal_ref, +				 struct bkey *replace_key)  { -	if (b->level) { -		int ret; -		struct bkey *insert = op->keys.bottom; -		struct bkey *k = bch_next_recurse_key(b, &START_KEY(insert)); +	struct closure cl; -		if (!k) { -			btree_bug(b, "no key to recurse on at level %i/%i", -				  b->level, b->c->root->level); +	BUG_ON(b->level && replace_key); -			op->keys.top = op->keys.bottom; -			return -EIO; -		} +	closure_init_stack(&cl); -		if (bkey_cmp(insert, k) > 0) { -			unsigned i; +	mutex_lock(&b->write_lock); -			if (op->type == BTREE_REPLACE) { -				__bkey_put(b->c, insert); -				op->keys.top = op->keys.bottom; -				op->insert_collision = true; -				return 0; -			} +	if (write_block(b) != btree_bset_last(b) && +	    b->keys.last_set_unwritten) +		bch_btree_init_next(b); /* just wrote a set */ -			for (i = 0; i < KEY_PTRS(insert); i++) -				atomic_inc(&PTR_BUCKET(b->c, insert, i)->pin); +	if (bch_keylist_nkeys(insert_keys) > insert_u64s_remaining(b)) { +		mutex_unlock(&b->write_lock); +		goto split; +	} -			bkey_copy(stack_keys->top, insert); +	BUG_ON(write_block(b) != btree_bset_last(b)); -			bch_cut_back(k, insert); -			bch_cut_front(k, stack_keys->top); +	if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) { +		if (!b->level) +			bch_btree_leaf_dirty(b, journal_ref); +		else +			bch_btree_node_write(b, &cl); +	} -			bch_keylist_push(stack_keys); -		} +	mutex_unlock(&b->write_lock); -		ret = btree(insert_recurse, k, b, op, stack_keys); -		if (ret) -			return ret; +	/* wait for btree node write if necessary, after unlock */ +	closure_sync(&cl); + +	return 0; +split: +	if (current->bio_list) { +		op->lock = b->c->root->level + 1; +		return -EAGAIN; +	} else if (op->lock <= b->c->root->level) { +		op->lock = b->c->root->level + 1; +		return -EINTR; +	} else { +		/* Invalidated all iterators */ +		int ret = btree_split(b, op, insert_keys, replace_key); + +		if (bch_keylist_empty(insert_keys)) +			return 0; +		else if (!ret) +			return -EINTR; +		return ret;  	} +} -	if (!bch_keylist_empty(&op->keys)) { -		if (should_split(b)) { -			if (op->lock <= b->c->root->level) { -				BUG_ON(b->level); -				op->lock = b->c->root->level + 1; -				return -EINTR; -			} -			return btree_split(b, op); -		} +int bch_btree_insert_check_key(struct btree *b, struct btree_op *op, +			       struct bkey *check_key) +{ +	int ret = -EINTR; +	uint64_t btree_ptr = b->key.ptr[0]; +	unsigned long seq = b->seq; +	struct keylist insert; +	bool upgrade = op->lock == -1; -		BUG_ON(write_block(b) != b->sets[b->nsets].data); +	bch_keylist_init(&insert); -		if (bch_btree_insert_keys(b, op)) { -			if (!b->level) -				bch_btree_leaf_dirty(b, op); -			else -				bch_btree_node_write(b, &op->cl); -		} +	if (upgrade) { +		rw_unlock(false, b); +		rw_lock(true, b, b->level); + +		if (b->key.ptr[0] != btree_ptr || +		    b->seq != seq + 1) +			goto out;  	} -	return 0; +	SET_KEY_PTRS(check_key, 1); +	get_random_bytes(&check_key->ptr[0], sizeof(uint64_t)); + +	SET_PTR_DEV(check_key, 0, PTR_CHECK_DEV); + +	bch_keylist_add(&insert, check_key); + +	ret = bch_btree_insert_node(b, op, &insert, NULL, NULL); + +	BUG_ON(!ret && !bch_keylist_empty(&insert)); +out: +	if (upgrade) +		downgrade_write(&b->lock); +	return ret;  } -int bch_btree_insert(struct btree_op *op, struct cache_set *c) +struct btree_insert_op { +	struct btree_op	op; +	struct keylist	*keys; +	atomic_t	*journal_ref; +	struct bkey	*replace_key; +}; + +static int btree_insert_fn(struct btree_op *b_op, struct btree *b)  { -	int ret = 0; -	struct keylist stack_keys; +	struct btree_insert_op *op = container_of(b_op, +					struct btree_insert_op, op); -	/* -	 * Don't want to block with the btree locked unless we have to, -	 * otherwise we get deadlocks with try_harder and between split/gc -	 */ -	clear_closure_blocking(&op->cl); - -	BUG_ON(bch_keylist_empty(&op->keys)); -	bch_keylist_copy(&stack_keys, &op->keys); -	bch_keylist_init(&op->keys); - -	while (!bch_keylist_empty(&stack_keys) || -	       !bch_keylist_empty(&op->keys)) { -		if (bch_keylist_empty(&op->keys)) { -			bch_keylist_add(&op->keys, -					bch_keylist_pop(&stack_keys)); -			op->lock = 0; -		} +	int ret = bch_btree_insert_node(b, &op->op, op->keys, +					op->journal_ref, op->replace_key); +	if (ret && !bch_keylist_empty(op->keys)) +		return ret; +	else +		return MAP_DONE; +} -		ret = btree_root(insert_recurse, c, op, &stack_keys); +int bch_btree_insert(struct cache_set *c, struct keylist *keys, +		     atomic_t *journal_ref, struct bkey *replace_key) +{ +	struct btree_insert_op op; +	int ret = 0; -		if (ret == -EAGAIN) { -			ret = 0; -			closure_sync(&op->cl); -		} else if (ret) { -			struct bkey *k; +	BUG_ON(current->bio_list); +	BUG_ON(bch_keylist_empty(keys)); + +	bch_btree_op_init(&op.op, 0); +	op.keys		= keys; +	op.journal_ref	= journal_ref; +	op.replace_key	= replace_key; + +	while (!ret && !bch_keylist_empty(keys)) { +		op.op.lock = 0; +		ret = bch_btree_map_leaf_nodes(&op.op, c, +					       &START_KEY(keys->keys), +					       btree_insert_fn); +	} -			pr_err("error %i trying to insert key for %s", -			       ret, op_type(op)); +	if (ret) { +		struct bkey *k; -			while ((k = bch_keylist_pop(&stack_keys) ?: -				    bch_keylist_pop(&op->keys))) -				bkey_put(c, k, 0); -		} -	} +		pr_err("error %i", ret); -	bch_keylist_free(&stack_keys); +		while ((k = bch_keylist_pop(keys))) +			bkey_put(c, k); +	} else if (op.op.insert_collision) +		ret = -ESRCH; -	if (op->journal) -		atomic_dec_bug(op->journal); -	op->journal = NULL;  	return ret;  } @@ -2141,132 +2243,81 @@ void bch_btree_set_root(struct btree *b)  	mutex_unlock(&b->c->bucket_lock);  	b->c->root = b; -	__bkey_put(b->c, &b->key);  	bch_journal_meta(b->c, &cl);  	closure_sync(&cl);  } -/* Cache lookup */ +/* Map across nodes or keys */ -static int submit_partial_cache_miss(struct btree *b, struct btree_op *op, -				     struct bkey *k) +static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op, +				       struct bkey *from, +				       btree_map_nodes_fn *fn, int flags)  { -	struct search *s = container_of(op, struct search, op); -	struct bio *bio = &s->bio.bio; -	int ret = 0; +	int ret = MAP_CONTINUE; + +	if (b->level) { +		struct bkey *k; +		struct btree_iter iter; -	while (!ret && -	       !op->lookup_done) { -		unsigned sectors = INT_MAX; +		bch_btree_iter_init(&b->keys, &iter, from); -		if (KEY_INODE(k) == op->inode) { -			if (KEY_START(k) <= bio->bi_sector) -				break; +		while ((k = bch_btree_iter_next_filter(&iter, &b->keys, +						       bch_ptr_bad))) { +			ret = btree(map_nodes_recurse, k, b, +				    op, from, fn, flags); +			from = NULL; -			sectors = min_t(uint64_t, sectors, -					KEY_START(k) - bio->bi_sector); +			if (ret != MAP_CONTINUE) +				return ret;  		} - -		ret = s->d->cache_miss(b, s, bio, sectors);  	} +	if (!b->level || flags == MAP_ALL_NODES) +		ret = fn(op, b); +  	return ret;  } -/* - * Read from a single key, handling the initial cache miss if the key starts in - * the middle of the bio - */ -static int submit_partial_cache_hit(struct btree *b, struct btree_op *op, -				    struct bkey *k) +int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c, +			  struct bkey *from, btree_map_nodes_fn *fn, int flags)  { -	struct search *s = container_of(op, struct search, op); -	struct bio *bio = &s->bio.bio; -	unsigned ptr; -	struct bio *n; - -	int ret = submit_partial_cache_miss(b, op, k); -	if (ret || op->lookup_done) -		return ret; - -	/* XXX: figure out best pointer - for multiple cache devices */ -	ptr = 0; - -	PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO; - -	while (!op->lookup_done && -	       KEY_INODE(k) == op->inode && -	       bio->bi_sector < KEY_OFFSET(k)) { -		struct bkey *bio_key; -		sector_t sector = PTR_OFFSET(k, ptr) + -			(bio->bi_sector - KEY_START(k)); -		unsigned sectors = min_t(uint64_t, INT_MAX, -					 KEY_OFFSET(k) - bio->bi_sector); - -		n = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split); -		if (n == bio) -			op->lookup_done = true; - -		bio_key = &container_of(n, struct bbio, bio)->key; - -		/* -		 * The bucket we're reading from might be reused while our bio -		 * is in flight, and we could then end up reading the wrong -		 * data. -		 * -		 * We guard against this by checking (in cache_read_endio()) if -		 * the pointer is stale again; if so, we treat it as an error -		 * and reread from the backing device (but we don't pass that -		 * error up anywhere). -		 */ - -		bch_bkey_copy_single_ptr(bio_key, k, ptr); -		SET_PTR_OFFSET(bio_key, 0, sector); - -		n->bi_end_io	= bch_cache_read_endio; -		n->bi_private	= &s->cl; - -		__bch_submit_bbio(n, b->c); -	} - -	return 0; +	return btree_root(map_nodes_recurse, c, op, from, fn, flags);  } -int bch_btree_search_recurse(struct btree *b, struct btree_op *op) +static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op, +				      struct bkey *from, btree_map_keys_fn *fn, +				      int flags)  { -	struct search *s = container_of(op, struct search, op); -	struct bio *bio = &s->bio.bio; - -	int ret = 0; +	int ret = MAP_CONTINUE;  	struct bkey *k;  	struct btree_iter iter; -	bch_btree_iter_init(b, &iter, &KEY(op->inode, bio->bi_sector, 0)); -	do { -		k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad); -		if (!k) { -			/* -			 * b->key would be exactly what we want, except that -			 * pointers to btree nodes have nonzero size - we -			 * wouldn't go far enough -			 */ +	bch_btree_iter_init(&b->keys, &iter, from); -			ret = submit_partial_cache_miss(b, op, -					&KEY(KEY_INODE(&b->key), -					     KEY_OFFSET(&b->key), 0)); -			break; -		} +	while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) { +		ret = !b->level +			? fn(op, b, k) +			: btree(map_keys_recurse, k, b, op, from, fn, flags); +		from = NULL; -		ret = b->level -			? btree(search_recurse, k, b, op) -			: submit_partial_cache_hit(b, op, k); -	} while (!ret && -		 !op->lookup_done); +		if (ret != MAP_CONTINUE) +			return ret; +	} + +	if (!b->level && (flags & MAP_END_KEY)) +		ret = fn(op, b, &KEY(KEY_INODE(&b->key), +				     KEY_OFFSET(&b->key), 0));  	return ret;  } +int bch_btree_map_keys(struct btree_op *op, struct cache_set *c, +		       struct bkey *from, btree_map_keys_fn *fn, int flags) +{ +	return btree_root(map_keys_recurse, c, op, from, fn, flags); +} +  /* Keybuf code */  static inline int keybuf_cmp(struct keybuf_key *l, struct keybuf_key *r) @@ -2285,80 +2336,79 @@ static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l,  	return clamp_t(int64_t, bkey_cmp(&l->key, &r->key), -1, 1);  } -static int bch_btree_refill_keybuf(struct btree *b, struct btree_op *op, -				   struct keybuf *buf, struct bkey *end, -				   keybuf_pred_fn *pred) -{ -	struct btree_iter iter; -	bch_btree_iter_init(b, &iter, &buf->last_scanned); - -	while (!array_freelist_empty(&buf->freelist)) { -		struct bkey *k = bch_btree_iter_next_filter(&iter, b, -							    bch_ptr_bad); - -		if (!b->level) { -			if (!k) { -				buf->last_scanned = b->key; -				break; -			} +struct refill { +	struct btree_op	op; +	unsigned	nr_found; +	struct keybuf	*buf; +	struct bkey	*end; +	keybuf_pred_fn	*pred; +}; -			buf->last_scanned = *k; -			if (bkey_cmp(&buf->last_scanned, end) >= 0) -				break; +static int refill_keybuf_fn(struct btree_op *op, struct btree *b, +			    struct bkey *k) +{ +	struct refill *refill = container_of(op, struct refill, op); +	struct keybuf *buf = refill->buf; +	int ret = MAP_CONTINUE; -			if (pred(buf, k)) { -				struct keybuf_key *w; +	if (bkey_cmp(k, refill->end) >= 0) { +		ret = MAP_DONE; +		goto out; +	} -				spin_lock(&buf->lock); +	if (!KEY_SIZE(k)) /* end key */ +		goto out; -				w = array_alloc(&buf->freelist); +	if (refill->pred(buf, k)) { +		struct keybuf_key *w; -				w->private = NULL; -				bkey_copy(&w->key, k); +		spin_lock(&buf->lock); -				if (RB_INSERT(&buf->keys, w, node, keybuf_cmp)) -					array_free(&buf->freelist, w); +		w = array_alloc(&buf->freelist); +		if (!w) { +			spin_unlock(&buf->lock); +			return MAP_DONE; +		} -				spin_unlock(&buf->lock); -			} -		} else { -			if (!k) -				break; +		w->private = NULL; +		bkey_copy(&w->key, k); -			btree(refill_keybuf, k, b, op, buf, end, pred); -			/* -			 * Might get an error here, but can't really do anything -			 * and it'll get logged elsewhere. Just read what we -			 * can. -			 */ +		if (RB_INSERT(&buf->keys, w, node, keybuf_cmp)) +			array_free(&buf->freelist, w); +		else +			refill->nr_found++; -			if (bkey_cmp(&buf->last_scanned, end) >= 0) -				break; +		if (array_freelist_empty(&buf->freelist)) +			ret = MAP_DONE; -			cond_resched(); -		} +		spin_unlock(&buf->lock);  	} - -	return 0; +out: +	buf->last_scanned = *k; +	return ret;  }  void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,  		       struct bkey *end, keybuf_pred_fn *pred)  {  	struct bkey start = buf->last_scanned; -	struct btree_op op; -	bch_btree_op_init_stack(&op); +	struct refill refill;  	cond_resched(); -	btree_root(refill_keybuf, c, &op, buf, end, pred); -	closure_sync(&op.cl); +	bch_btree_op_init(&refill.op, -1); +	refill.nr_found	= 0; +	refill.buf	= buf; +	refill.end	= end; +	refill.pred	= pred; + +	bch_btree_map_keys(&refill.op, c, &buf->last_scanned, +			   refill_keybuf_fn, MAP_END_KEY); -	pr_debug("found %s keys from %llu:%llu to %llu:%llu", -		 RB_EMPTY_ROOT(&buf->keys) ? "no" : -		 array_freelist_empty(&buf->freelist) ? "some" : "a few", -		 KEY_INODE(&start), KEY_OFFSET(&start), -		 KEY_INODE(&buf->last_scanned), KEY_OFFSET(&buf->last_scanned)); +	trace_bcache_keyscan(refill.nr_found, +			     KEY_INODE(&start), KEY_OFFSET(&start), +			     KEY_INODE(&buf->last_scanned), +			     KEY_OFFSET(&buf->last_scanned));  	spin_lock(&buf->lock); @@ -2436,9 +2486,9 @@ struct keybuf_key *bch_keybuf_next(struct keybuf *buf)  }  struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c, -					     struct keybuf *buf, -					     struct bkey *end, -					     keybuf_pred_fn *pred) +					  struct keybuf *buf, +					  struct bkey *end, +					  keybuf_pred_fn *pred)  {  	struct keybuf_key *ret; @@ -2466,20 +2516,3 @@ void bch_keybuf_init(struct keybuf *buf)  	spin_lock_init(&buf->lock);  	array_allocator_init(&buf->freelist);  } - -void bch_btree_exit(void) -{ -	if (btree_io_wq) -		destroy_workqueue(btree_io_wq); -	if (bch_gc_wq) -		destroy_workqueue(bch_gc_wq); -} - -int __init bch_btree_init(void) -{ -	if (!(bch_gc_wq = create_singlethread_workqueue("bch_btree_gc")) || -	    !(btree_io_wq = create_singlethread_workqueue("bch_btree_io"))) -		return -ENOMEM; - -	return 0; -} diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h index 3333d372363..91dfa5e6968 100644 --- a/drivers/md/bcache/btree.h +++ b/drivers/md/bcache/btree.h @@ -125,24 +125,19 @@ struct btree {  	unsigned long		seq;  	struct rw_semaphore	lock;  	struct cache_set	*c; +	struct btree		*parent; + +	struct mutex		write_lock;  	unsigned long		flags;  	uint16_t		written;	/* would be nice to kill */  	uint8_t			level; -	uint8_t			nsets; -	uint8_t			page_order; - -	/* -	 * Set of sorted keys - the real btree node - plus a binary search tree -	 * -	 * sets[0] is special; set[0]->tree, set[0]->prev and set[0]->data point -	 * to the memory we have allocated for this btree node. Additionally, -	 * set[0]->data points to the entire btree node as it exists on disk. -	 */ -	struct bset_tree	sets[MAX_BSETS]; + +	struct btree_keys	keys;  	/* For outstanding btree writes, used as a lock - protects write_idx */ -	struct closure_with_waitlist	io; +	struct closure		io; +	struct semaphore	io_mutex;  	struct list_head	list;  	struct delayed_work	work; @@ -178,42 +173,27 @@ static inline struct btree_write *btree_prev_write(struct btree *b)  	return b->writes + (btree_node_write_idx(b) ^ 1);  } -static inline unsigned bset_offset(struct btree *b, struct bset *i) +static inline struct bset *btree_bset_first(struct btree *b)  { -	return (((size_t) i) - ((size_t) b->sets->data)) >> 9; +	return b->keys.set->data;  } -static inline struct bset *write_block(struct btree *b) +static inline struct bset *btree_bset_last(struct btree *b)  { -	return ((void *) b->sets[0].data) + b->written * block_bytes(b->c); +	return bset_tree_last(&b->keys)->data;  } -static inline bool bset_written(struct btree *b, struct bset_tree *t) +static inline unsigned bset_block_offset(struct btree *b, struct bset *i)  { -	return t->data < write_block(b); -} - -static inline bool bkey_written(struct btree *b, struct bkey *k) -{ -	return k < write_block(b)->start; +	return bset_sector_offset(&b->keys, i) >> b->c->block_bits;  }  static inline void set_gc_sectors(struct cache_set *c)  { -	atomic_set(&c->sectors_to_gc, c->sb.bucket_size * c->nbuckets / 8); +	atomic_set(&c->sectors_to_gc, c->sb.bucket_size * c->nbuckets / 16);  } -static inline bool bch_ptr_invalid(struct btree *b, const struct bkey *k) -{ -	return __bch_ptr_invalid(b->c, b->level, k); -} - -static inline struct bkey *bch_btree_iter_init(struct btree *b, -					       struct btree_iter *iter, -					       struct bkey *search) -{ -	return __bch_btree_iter_init(b, iter, search, b->sets); -} +void bkey_put(struct cache_set *c, struct bkey *k);  /* Looping macros */ @@ -223,62 +203,24 @@ static inline struct bkey *bch_btree_iter_init(struct btree *b,  	     iter++)							\  		hlist_for_each_entry_rcu((b), (c)->bucket_hash + iter, hash) -#define for_each_key_filter(b, k, iter, filter)				\ -	for (bch_btree_iter_init((b), (iter), NULL);			\ -	     ((k) = bch_btree_iter_next_filter((iter), b, filter));) - -#define for_each_key(b, k, iter)					\ -	for (bch_btree_iter_init((b), (iter), NULL);			\ -	     ((k) = bch_btree_iter_next(iter));) -  /* Recursing down the btree */  struct btree_op { -	struct closure		cl; -	struct cache_set	*c; - -	/* Journal entry we have a refcount on */ -	atomic_t		*journal; - -	/* Bio to be inserted into the cache */ -	struct bio		*cache_bio; - -	unsigned		inode; - -	uint16_t		write_prio; +	/* for waiting on btree reserve in btree_split() */ +	wait_queue_t		wait;  	/* Btree level at which we start taking write locks */  	short			lock; -	/* Btree insertion type */ -	enum { -		BTREE_INSERT, -		BTREE_REPLACE -	} type:8; - -	unsigned		csum:1; -	unsigned		skip:1; -	unsigned		flush_journal:1; - -	unsigned		insert_data_done:1; -	unsigned		lookup_done:1;  	unsigned		insert_collision:1; - -	/* Anything after this point won't get zeroed in do_bio_hook() */ - -	/* Keys to be inserted */ -	struct keylist		keys; -	BKEY_PADDED(replace); -}; - -enum { -	BTREE_INSERT_STATUS_INSERT, -	BTREE_INSERT_STATUS_BACK_MERGE, -	BTREE_INSERT_STATUS_OVERWROTE, -	BTREE_INSERT_STATUS_FRONT_MERGE,  }; -void bch_btree_op_init_stack(struct btree_op *); +static inline void bch_btree_op_init(struct btree_op *op, int write_lock_level) +{ +	memset(op, 0, sizeof(struct btree_op)); +	init_wait(&op->wait); +	op->lock = write_lock_level; +}  static inline void rw_lock(bool w, struct btree *b, int level)  { @@ -290,108 +232,73 @@ static inline void rw_lock(bool w, struct btree *b, int level)  static inline void rw_unlock(bool w, struct btree *b)  { -#ifdef CONFIG_BCACHE_EDEBUG -	unsigned i; - -	if (w && b->key.ptr[0]) -		for (i = 0; i <= b->nsets; i++) -			bch_check_key_order(b, b->sets[i].data); -#endif -  	if (w)  		b->seq++;  	(w ? up_write : up_read)(&b->lock);  } -#define insert_lock(s, b)	((b)->level <= (s)->lock) - -/* - * These macros are for recursing down the btree - they handle the details of - * locking and looking up nodes in the cache for you. They're best treated as - * mere syntax when reading code that uses them. - * - * op->lock determines whether we take a read or a write lock at a given depth. - * If you've got a read lock and find that you need a write lock (i.e. you're - * going to have to split), set op->lock and return -EINTR; btree_root() will - * call you again and you'll have the correct lock. - */ - -/** - * btree - recurse down the btree on a specified key - * @fn:		function to call, which will be passed the child node - * @key:	key to recurse on - * @b:		parent btree node - * @op:		pointer to struct btree_op - */ -#define btree(fn, key, b, op, ...)					\ -({									\ -	int _r, l = (b)->level - 1;					\ -	bool _w = l <= (op)->lock;					\ -	struct btree *_b = bch_btree_node_get((b)->c, key, l, op);	\ -	if (!IS_ERR(_b)) {						\ -		_r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__);		\ -		rw_unlock(_w, _b);					\ -	} else								\ -		_r = PTR_ERR(_b);					\ -	_r;								\ -}) - -/** - * btree_root - call a function on the root of the btree - * @fn:		function to call, which will be passed the child node - * @c:		cache set - * @op:		pointer to struct btree_op - */ -#define btree_root(fn, c, op, ...)					\ -({									\ -	int _r = -EINTR;						\ -	do {								\ -		struct btree *_b = (c)->root;				\ -		bool _w = insert_lock(op, _b);				\ -		rw_lock(_w, _b, _b->level);				\ -		if (_b == (c)->root &&					\ -		    _w == insert_lock(op, _b))				\ -			_r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__);	\ -		rw_unlock(_w, _b);					\ -		bch_cannibalize_unlock(c, &(op)->cl);		\ -	} while (_r == -EINTR);						\ -									\ -	_r;								\ -}) +void bch_btree_node_read_done(struct btree *); +void __bch_btree_node_write(struct btree *, struct closure *); +void bch_btree_node_write(struct btree *, struct closure *); -static inline bool should_split(struct btree *b) +void bch_btree_set_root(struct btree *); +struct btree *bch_btree_node_alloc(struct cache_set *, struct btree_op *, int); +struct btree *bch_btree_node_get(struct cache_set *, struct btree_op *, +				 struct bkey *, int, bool); + +int bch_btree_insert_check_key(struct btree *, struct btree_op *, +			       struct bkey *); +int bch_btree_insert(struct cache_set *, struct keylist *, +		     atomic_t *, struct bkey *); + +int bch_gc_thread_start(struct cache_set *); +void bch_initial_gc_finish(struct cache_set *); +void bch_moving_gc(struct cache_set *); +int bch_btree_check(struct cache_set *); +void bch_initial_mark_key(struct cache_set *, int, struct bkey *); + +static inline void wake_up_gc(struct cache_set *c)  { -	struct bset *i = write_block(b); -	return b->written >= btree_blocks(b) || -		(i->seq == b->sets[0].data->seq && -		 b->written + __set_blocks(i, i->keys + 15, b->c) -		 > btree_blocks(b)); +	if (c->gc_thread) +		wake_up_process(c->gc_thread);  } -void bch_btree_node_read(struct btree *); -void bch_btree_node_write(struct btree *, struct closure *); +#define MAP_DONE	0 +#define MAP_CONTINUE	1 -void bch_cannibalize_unlock(struct cache_set *, struct closure *); -void bch_btree_set_root(struct btree *); -struct btree *bch_btree_node_alloc(struct cache_set *, int, struct closure *); -struct btree *bch_btree_node_get(struct cache_set *, struct bkey *, -				int, struct btree_op *); +#define MAP_ALL_NODES	0 +#define MAP_LEAF_NODES	1 + +#define MAP_END_KEY	1 -bool bch_btree_insert_check_key(struct btree *, struct btree_op *, -				   struct bio *); -int bch_btree_insert(struct btree_op *, struct cache_set *); +typedef int (btree_map_nodes_fn)(struct btree_op *, struct btree *); +int __bch_btree_map_nodes(struct btree_op *, struct cache_set *, +			  struct bkey *, btree_map_nodes_fn *, int); + +static inline int bch_btree_map_nodes(struct btree_op *op, struct cache_set *c, +				      struct bkey *from, btree_map_nodes_fn *fn) +{ +	return __bch_btree_map_nodes(op, c, from, fn, MAP_ALL_NODES); +} + +static inline int bch_btree_map_leaf_nodes(struct btree_op *op, +					   struct cache_set *c, +					   struct bkey *from, +					   btree_map_nodes_fn *fn) +{ +	return __bch_btree_map_nodes(op, c, from, fn, MAP_LEAF_NODES); +} -int bch_btree_search_recurse(struct btree *, struct btree_op *); +typedef int (btree_map_keys_fn)(struct btree_op *, struct btree *, +				struct bkey *); +int bch_btree_map_keys(struct btree_op *, struct cache_set *, +		       struct bkey *, btree_map_keys_fn *, int); -void bch_queue_gc(struct cache_set *); -size_t bch_btree_gc_finish(struct cache_set *); -void bch_moving_gc(struct closure *); -int bch_btree_check(struct cache_set *, struct btree_op *); -uint8_t __bch_btree_mark_key(struct cache_set *, int, struct bkey *); +typedef bool (keybuf_pred_fn)(struct keybuf *, struct bkey *);  void bch_keybuf_init(struct keybuf *); -void bch_refill_keybuf(struct cache_set *, struct keybuf *, struct bkey *, -		       keybuf_pred_fn *); +void bch_refill_keybuf(struct cache_set *, struct keybuf *, +		       struct bkey *, keybuf_pred_fn *);  bool bch_keybuf_check_overlapping(struct keybuf *, struct bkey *,  				  struct bkey *);  void bch_keybuf_del(struct keybuf *, struct keybuf_key *); diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c index 9aba2017f0d..7a228de95fd 100644 --- a/drivers/md/bcache/closure.c +++ b/drivers/md/bcache/closure.c @@ -11,47 +11,12 @@  #include "closure.h" -void closure_queue(struct closure *cl) -{ -	struct workqueue_struct *wq = cl->wq; -	if (wq) { -		INIT_WORK(&cl->work, cl->work.func); -		BUG_ON(!queue_work(wq, &cl->work)); -	} else -		cl->fn(cl); -} -EXPORT_SYMBOL_GPL(closure_queue); - -#define CL_FIELD(type, field)					\ -	case TYPE_ ## type:					\ -	return &container_of(cl, struct type, cl)->field - -static struct closure_waitlist *closure_waitlist(struct closure *cl) -{ -	switch (cl->type) { -		CL_FIELD(closure_with_waitlist, wait); -		CL_FIELD(closure_with_waitlist_and_timer, wait); -	default: -		return NULL; -	} -} - -static struct timer_list *closure_timer(struct closure *cl) -{ -	switch (cl->type) { -		CL_FIELD(closure_with_timer, timer); -		CL_FIELD(closure_with_waitlist_and_timer, timer); -	default: -		return NULL; -	} -} -  static inline void closure_put_after_sub(struct closure *cl, int flags)  {  	int r = flags & CLOSURE_REMAINING_MASK;  	BUG_ON(flags & CLOSURE_GUARD_MASK); -	BUG_ON(!r && (flags & ~(CLOSURE_DESTRUCTOR|CLOSURE_BLOCKING))); +	BUG_ON(!r && (flags & ~CLOSURE_DESTRUCTOR));  	/* Must deliver precisely one wakeup */  	if (r == 1 && (flags & CLOSURE_SLEEPING)) @@ -59,23 +24,15 @@ static inline void closure_put_after_sub(struct closure *cl, int flags)  	if (!r) {  		if (cl->fn && !(flags & CLOSURE_DESTRUCTOR)) { -			/* CLOSURE_BLOCKING might be set - clear it */  			atomic_set(&cl->remaining,  				   CLOSURE_REMAINING_INITIALIZER);  			closure_queue(cl);  		} else {  			struct closure *parent = cl->parent; -			struct closure_waitlist *wait = closure_waitlist(cl);  			closure_fn *destructor = cl->fn;  			closure_debug_destroy(cl); -			smp_mb(); -			atomic_set(&cl->remaining, -1); - -			if (wait) -				closure_wake_up(wait); -  			if (destructor)  				destructor(cl); @@ -90,21 +47,20 @@ void closure_sub(struct closure *cl, int v)  {  	closure_put_after_sub(cl, atomic_sub_return(v, &cl->remaining));  } -EXPORT_SYMBOL_GPL(closure_sub); +EXPORT_SYMBOL(closure_sub); +/** + * closure_put - decrement a closure's refcount + */  void closure_put(struct closure *cl)  {  	closure_put_after_sub(cl, atomic_dec_return(&cl->remaining));  } -EXPORT_SYMBOL_GPL(closure_put); - -static void set_waiting(struct closure *cl, unsigned long f) -{ -#ifdef CONFIG_BCACHE_CLOSURES_DEBUG -	cl->waiting_on = f; -#endif -} +EXPORT_SYMBOL(closure_put); +/** + * closure_wake_up - wake up all closures on a wait list, without memory barrier + */  void __closure_wake_up(struct closure_waitlist *wait_list)  {  	struct llist_node *list; @@ -129,27 +85,34 @@ void __closure_wake_up(struct closure_waitlist *wait_list)  		cl = container_of(reverse, struct closure, list);  		reverse = llist_next(reverse); -		set_waiting(cl, 0); +		closure_set_waiting(cl, 0);  		closure_sub(cl, CLOSURE_WAITING + 1);  	}  } -EXPORT_SYMBOL_GPL(__closure_wake_up); +EXPORT_SYMBOL(__closure_wake_up); -bool closure_wait(struct closure_waitlist *list, struct closure *cl) +/** + * closure_wait - add a closure to a waitlist + * + * @waitlist will own a ref on @cl, which will be released when + * closure_wake_up() is called on @waitlist. + * + */ +bool closure_wait(struct closure_waitlist *waitlist, struct closure *cl)  {  	if (atomic_read(&cl->remaining) & CLOSURE_WAITING)  		return false; -	set_waiting(cl, _RET_IP_); +	closure_set_waiting(cl, _RET_IP_);  	atomic_add(CLOSURE_WAITING + 1, &cl->remaining); -	llist_add(&cl->list, &list->list); +	llist_add(&cl->list, &waitlist->list);  	return true;  } -EXPORT_SYMBOL_GPL(closure_wait); +EXPORT_SYMBOL(closure_wait);  /** - * closure_sync() - sleep until a closure a closure has nothing left to wait on + * closure_sync - sleep until a closure a closure has nothing left to wait on   *   * Sleeps until the refcount hits 1 - the thread that's running the closure owns   * the last refcount. @@ -169,93 +132,7 @@ void closure_sync(struct closure *cl)  	__closure_end_sleep(cl);  } -EXPORT_SYMBOL_GPL(closure_sync); - -/** - * closure_trylock() - try to acquire the closure, without waiting - * @cl:		closure to lock - * - * Returns true if the closure was succesfully locked. - */ -bool closure_trylock(struct closure *cl, struct closure *parent) -{ -	if (atomic_cmpxchg(&cl->remaining, -1, -			   CLOSURE_REMAINING_INITIALIZER) != -1) -		return false; - -	closure_set_ret_ip(cl); - -	smp_mb(); -	cl->parent = parent; -	if (parent) -		closure_get(parent); - -	closure_debug_create(cl); -	return true; -} -EXPORT_SYMBOL_GPL(closure_trylock); - -void __closure_lock(struct closure *cl, struct closure *parent, -		    struct closure_waitlist *wait_list) -{ -	struct closure wait; -	closure_init_stack(&wait); - -	while (1) { -		if (closure_trylock(cl, parent)) -			return; - -		closure_wait_event_sync(wait_list, &wait, -					atomic_read(&cl->remaining) == -1); -	} -} -EXPORT_SYMBOL_GPL(__closure_lock); - -static void closure_delay_timer_fn(unsigned long data) -{ -	struct closure *cl = (struct closure *) data; -	closure_sub(cl, CLOSURE_TIMER + 1); -} - -void do_closure_timer_init(struct closure *cl) -{ -	struct timer_list *timer = closure_timer(cl); - -	init_timer(timer); -	timer->data	= (unsigned long) cl; -	timer->function = closure_delay_timer_fn; -} -EXPORT_SYMBOL_GPL(do_closure_timer_init); - -bool __closure_delay(struct closure *cl, unsigned long delay, -		     struct timer_list *timer) -{ -	if (atomic_read(&cl->remaining) & CLOSURE_TIMER) -		return false; - -	BUG_ON(timer_pending(timer)); - -	timer->expires	= jiffies + delay; - -	atomic_add(CLOSURE_TIMER + 1, &cl->remaining); -	add_timer(timer); -	return true; -} -EXPORT_SYMBOL_GPL(__closure_delay); - -void __closure_flush(struct closure *cl, struct timer_list *timer) -{ -	if (del_timer(timer)) -		closure_sub(cl, CLOSURE_TIMER + 1); -} -EXPORT_SYMBOL_GPL(__closure_flush); - -void __closure_flush_sync(struct closure *cl, struct timer_list *timer) -{ -	if (del_timer_sync(timer)) -		closure_sub(cl, CLOSURE_TIMER + 1); -} -EXPORT_SYMBOL_GPL(__closure_flush_sync); +EXPORT_SYMBOL(closure_sync);  #ifdef CONFIG_BCACHE_CLOSURES_DEBUG @@ -273,7 +150,7 @@ void closure_debug_create(struct closure *cl)  	list_add(&cl->all, &closure_list);  	spin_unlock_irqrestore(&closure_list_lock, flags);  } -EXPORT_SYMBOL_GPL(closure_debug_create); +EXPORT_SYMBOL(closure_debug_create);  void closure_debug_destroy(struct closure *cl)  { @@ -286,7 +163,7 @@ void closure_debug_destroy(struct closure *cl)  	list_del(&cl->all);  	spin_unlock_irqrestore(&closure_list_lock, flags);  } -EXPORT_SYMBOL_GPL(closure_debug_destroy); +EXPORT_SYMBOL(closure_debug_destroy);  static struct dentry *debug; @@ -304,14 +181,12 @@ static int debug_seq_show(struct seq_file *f, void *data)  			   cl, (void *) cl->ip, cl->fn, cl->parent,  			   r & CLOSURE_REMAINING_MASK); -		seq_printf(f, "%s%s%s%s%s%s\n", +		seq_printf(f, "%s%s%s%s\n",  			   test_bit(WORK_STRUCT_PENDING,  				    work_data_bits(&cl->work)) ? "Q" : "",  			   r & CLOSURE_RUNNING	? "R" : "", -			   r & CLOSURE_BLOCKING	? "B" : "",  			   r & CLOSURE_STACK	? "S" : "", -			   r & CLOSURE_SLEEPING	? "Sl" : "", -			   r & CLOSURE_TIMER	? "T" : ""); +			   r & CLOSURE_SLEEPING	? "Sl" : "");  		if (r & CLOSURE_WAITING)  			seq_printf(f, " W %pF\n", diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h index 00039924ea9..a08e3eeac3c 100644 --- a/drivers/md/bcache/closure.h +++ b/drivers/md/bcache/closure.h @@ -72,30 +72,6 @@   * closure - _always_ use continue_at(). Doing so consistently will help   * eliminate an entire class of particularly pernicious races.   * - * For a closure to wait on an arbitrary event, we need to introduce waitlists: - * - * struct closure_waitlist list; - * closure_wait_event(list, cl, condition); - * closure_wake_up(wait_list); - * - * These work analagously to wait_event() and wake_up() - except that instead of - * operating on the current thread (for wait_event()) and lists of threads, they - * operate on an explicit closure and lists of closures. - * - * Because it's a closure we can now wait either synchronously or - * asynchronously. closure_wait_event() returns the current value of the - * condition, and if it returned false continue_at() or closure_sync() can be - * used to wait for it to become true. - * - * It's useful for waiting on things when you can't sleep in the context in - * which you must check the condition (perhaps a spinlock held, or you might be - * beneath generic_make_request() - in which case you can't sleep on IO). - * - * closure_wait_event() will wait either synchronously or asynchronously, - * depending on whether the closure is in blocking mode or not. You can pick a - * mode explicitly with closure_wait_event_sync() and - * closure_wait_event_async(), which do just what you might expect. - *   * Lastly, you might have a wait list dedicated to a specific event, and have no   * need for specifying the condition - you just want to wait until someone runs   * closure_wake_up() on the appropriate wait list. In that case, just use @@ -121,55 +97,6 @@   * All this implies that a closure should typically be embedded in a particular   * struct (which its refcount will normally control the lifetime of), and that   * struct can very much be thought of as a stack frame. - * - * Locking: - * - * Closures are based on work items but they can be thought of as more like - * threads - in that like threads and unlike work items they have a well - * defined lifetime; they are created (with closure_init()) and eventually - * complete after a continue_at(cl, NULL, NULL). - * - * Suppose you've got some larger structure with a closure embedded in it that's - * used for periodically doing garbage collection. You only want one garbage - * collection happening at a time, so the natural thing to do is protect it with - * a lock. However, it's difficult to use a lock protecting a closure correctly - * because the unlock should come after the last continue_to() (additionally, if - * you're using the closure asynchronously a mutex won't work since a mutex has - * to be unlocked by the same process that locked it). - * - * So to make it less error prone and more efficient, we also have the ability - * to use closures as locks: - * - * closure_init_unlocked(); - * closure_trylock(); - * - * That's all we need for trylock() - the last closure_put() implicitly unlocks - * it for you.  But for closure_lock(), we also need a wait list: - * - * struct closure_with_waitlist frobnicator_cl; - * - * closure_init_unlocked(&frobnicator_cl); - * closure_lock(&frobnicator_cl); - * - * A closure_with_waitlist embeds a closure and a wait list - much like struct - * delayed_work embeds a work item and a timer_list. The important thing is, use - * it exactly like you would a regular closure and closure_put() will magically - * handle everything for you. - * - * We've got closures that embed timers, too. They're called, appropriately - * enough: - * struct closure_with_timer; - * - * This gives you access to closure_delay(). It takes a refcount for a specified - * number of jiffies - you could then call closure_sync() (for a slightly - * convoluted version of msleep()) or continue_at() - which gives you the same - * effect as using a delayed work item, except you can reuse the work_struct - * already embedded in struct closure. - * - * Lastly, there's struct closure_with_waitlist_and_timer. It does what you - * probably expect, if you happen to need the features of both. (You don't - * really want to know how all this is implemented, but if I've done my job - * right you shouldn't have to care).   */  struct closure; @@ -179,19 +106,8 @@ struct closure_waitlist {  	struct llist_head	list;  }; -enum closure_type { -	TYPE_closure				= 0, -	TYPE_closure_with_waitlist		= 1, -	TYPE_closure_with_timer			= 2, -	TYPE_closure_with_waitlist_and_timer	= 3, -	MAX_CLOSURE_TYPE			= 3, -}; -  enum closure_state {  	/* -	 * CLOSURE_BLOCKING: Causes closure_wait_event() to block, instead of -	 * waiting asynchronously -	 *  	 * CLOSURE_WAITING: Set iff the closure is on a waitlist. Must be set by  	 * the thread that owns the closure, and cleared by the thread that's  	 * waking up the closure. @@ -200,10 +116,6 @@ enum closure_state {  	 * - indicates that cl->task is valid and closure_put() may wake it up.  	 * Only set or cleared by the thread that owns the closure.  	 * -	 * CLOSURE_TIMER: Analagous to CLOSURE_WAITING, indicates that a closure -	 * has an outstanding timer. Must be set by the thread that owns the -	 * closure, and cleared by the timer function when the timer goes off. -	 *  	 * The rest are for debugging and don't affect behaviour:  	 *  	 * CLOSURE_RUNNING: Set when a closure is running (i.e. by @@ -218,19 +130,17 @@ enum closure_state {  	 * closure with this flag set  	 */ -	CLOSURE_BITS_START	= (1 << 19), -	CLOSURE_DESTRUCTOR	= (1 << 19), -	CLOSURE_BLOCKING	= (1 << 21), -	CLOSURE_WAITING		= (1 << 23), -	CLOSURE_SLEEPING	= (1 << 25), -	CLOSURE_TIMER		= (1 << 27), +	CLOSURE_BITS_START	= (1 << 23), +	CLOSURE_DESTRUCTOR	= (1 << 23), +	CLOSURE_WAITING		= (1 << 25), +	CLOSURE_SLEEPING	= (1 << 27),  	CLOSURE_RUNNING		= (1 << 29),  	CLOSURE_STACK		= (1 << 31),  };  #define CLOSURE_GUARD_MASK					\ -	((CLOSURE_DESTRUCTOR|CLOSURE_BLOCKING|CLOSURE_WAITING|	\ -	  CLOSURE_SLEEPING|CLOSURE_TIMER|CLOSURE_RUNNING|CLOSURE_STACK) << 1) +	((CLOSURE_DESTRUCTOR|CLOSURE_WAITING|CLOSURE_SLEEPING|	\ +	  CLOSURE_RUNNING|CLOSURE_STACK) << 1)  #define CLOSURE_REMAINING_MASK		(CLOSURE_BITS_START - 1)  #define CLOSURE_REMAINING_INITIALIZER	(1|CLOSURE_RUNNING) @@ -250,8 +160,6 @@ struct closure {  	atomic_t		remaining; -	enum closure_type	type; -  #ifdef CONFIG_BCACHE_CLOSURES_DEBUG  #define CLOSURE_MAGIC_DEAD	0xc054dead  #define CLOSURE_MAGIC_ALIVE	0xc054a11e @@ -263,54 +171,12 @@ struct closure {  #endif  }; -struct closure_with_waitlist { -	struct closure		cl; -	struct closure_waitlist	wait; -}; - -struct closure_with_timer { -	struct closure		cl; -	struct timer_list	timer; -}; - -struct closure_with_waitlist_and_timer { -	struct closure		cl; -	struct closure_waitlist	wait; -	struct timer_list	timer; -}; - -extern unsigned invalid_closure_type(void); - -#define __CLOSURE_TYPE(cl, _t)						\ -	  __builtin_types_compatible_p(typeof(cl), struct _t)		\ -		? TYPE_ ## _t :						\ - -#define __closure_type(cl)						\ -(									\ -	__CLOSURE_TYPE(cl, closure)					\ -	__CLOSURE_TYPE(cl, closure_with_waitlist)			\ -	__CLOSURE_TYPE(cl, closure_with_timer)				\ -	__CLOSURE_TYPE(cl, closure_with_waitlist_and_timer)		\ -	invalid_closure_type()						\ -) -  void closure_sub(struct closure *cl, int v);  void closure_put(struct closure *cl); -void closure_queue(struct closure *cl);  void __closure_wake_up(struct closure_waitlist *list);  bool closure_wait(struct closure_waitlist *list, struct closure *cl);  void closure_sync(struct closure *cl); -bool closure_trylock(struct closure *cl, struct closure *parent); -void __closure_lock(struct closure *cl, struct closure *parent, -		    struct closure_waitlist *wait_list); - -void do_closure_timer_init(struct closure *cl); -bool __closure_delay(struct closure *cl, unsigned long delay, -		     struct timer_list *timer); -void __closure_flush(struct closure *cl, struct timer_list *timer); -void __closure_flush_sync(struct closure *cl, struct timer_list *timer); -  #ifdef CONFIG_BCACHE_CLOSURES_DEBUG  void closure_debug_init(void); @@ -339,200 +205,97 @@ static inline void closure_set_ret_ip(struct closure *cl)  #endif  } -static inline void closure_get(struct closure *cl) +static inline void closure_set_waiting(struct closure *cl, unsigned long f)  {  #ifdef CONFIG_BCACHE_CLOSURES_DEBUG -	BUG_ON((atomic_inc_return(&cl->remaining) & -		CLOSURE_REMAINING_MASK) <= 1); -#else -	atomic_inc(&cl->remaining); +	cl->waiting_on = f;  #endif  } -static inline void closure_set_stopped(struct closure *cl) +static inline void __closure_end_sleep(struct closure *cl)  { -	atomic_sub(CLOSURE_RUNNING, &cl->remaining); +	__set_current_state(TASK_RUNNING); + +	if (atomic_read(&cl->remaining) & CLOSURE_SLEEPING) +		atomic_sub(CLOSURE_SLEEPING, &cl->remaining);  } -static inline bool closure_is_stopped(struct closure *cl) +static inline void __closure_start_sleep(struct closure *cl)  { -	return !(atomic_read(&cl->remaining) & CLOSURE_RUNNING); +	closure_set_ip(cl); +	cl->task = current; +	set_current_state(TASK_UNINTERRUPTIBLE); + +	if (!(atomic_read(&cl->remaining) & CLOSURE_SLEEPING)) +		atomic_add(CLOSURE_SLEEPING, &cl->remaining);  } -static inline bool closure_is_unlocked(struct closure *cl) +static inline void closure_set_stopped(struct closure *cl)  { -	return atomic_read(&cl->remaining) == -1; +	atomic_sub(CLOSURE_RUNNING, &cl->remaining);  } -static inline void do_closure_init(struct closure *cl, struct closure *parent, -				   bool running) +static inline void set_closure_fn(struct closure *cl, closure_fn *fn, +				  struct workqueue_struct *wq)  { -	switch (cl->type) { -	case TYPE_closure_with_timer: -	case TYPE_closure_with_waitlist_and_timer: -		do_closure_timer_init(cl); -	default: -		break; -	} - -	cl->parent = parent; -	if (parent) -		closure_get(parent); - -	if (running) { -		closure_debug_create(cl); -		atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER); -	} else -		atomic_set(&cl->remaining, -1); - +	BUG_ON(object_is_on_stack(cl));  	closure_set_ip(cl); +	cl->fn = fn; +	cl->wq = wq; +	/* between atomic_dec() in closure_put() */ +	smp_mb__before_atomic();  } -/* - * Hack to get at the embedded closure if there is one, by doing an unsafe cast: - * the result of __closure_type() is thrown away, it's used merely for type - * checking. - */ -#define __to_internal_closure(cl)				\ -({								\ -	BUILD_BUG_ON(__closure_type(*cl) > MAX_CLOSURE_TYPE);	\ -	(struct closure *) cl;					\ -}) - -#define closure_init_type(cl, parent, running)			\ -do {								\ -	struct closure *_cl = __to_internal_closure(cl);	\ -	_cl->type = __closure_type(*(cl));			\ -	do_closure_init(_cl, parent, running);			\ -} while (0) +static inline void closure_queue(struct closure *cl) +{ +	struct workqueue_struct *wq = cl->wq; +	if (wq) { +		INIT_WORK(&cl->work, cl->work.func); +		BUG_ON(!queue_work(wq, &cl->work)); +	} else +		cl->fn(cl); +}  /** - * __closure_init() - Initialize a closure, skipping the memset() - * - * May be used instead of closure_init() when memory has already been zeroed. + * closure_get - increment a closure's refcount   */ -#define __closure_init(cl, parent)				\ -	closure_init_type(cl, parent, true) +static inline void closure_get(struct closure *cl) +{ +#ifdef CONFIG_BCACHE_CLOSURES_DEBUG +	BUG_ON((atomic_inc_return(&cl->remaining) & +		CLOSURE_REMAINING_MASK) <= 1); +#else +	atomic_inc(&cl->remaining); +#endif +}  /** - * closure_init() - Initialize a closure, setting the refcount to 1 + * closure_init - Initialize a closure, setting the refcount to 1   * @cl:		closure to initialize   * @parent:	parent of the new closure. cl will take a refcount on it for its   *		lifetime; may be NULL.   */ -#define closure_init(cl, parent)				\ -do {								\ -	memset((cl), 0, sizeof(*(cl)));				\ -	__closure_init(cl, parent);				\ -} while (0) - -static inline void closure_init_stack(struct closure *cl) +static inline void closure_init(struct closure *cl, struct closure *parent)  {  	memset(cl, 0, sizeof(struct closure)); -	atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER| -		   CLOSURE_BLOCKING|CLOSURE_STACK); -} - -/** - * closure_init_unlocked() - Initialize a closure but leave it unlocked. - * @cl:		closure to initialize - * - * For when the closure will be used as a lock. The closure may not be used - * until after a closure_lock() or closure_trylock(). - */ -#define closure_init_unlocked(cl)				\ -do {								\ -	memset((cl), 0, sizeof(*(cl)));				\ -	closure_init_type(cl, NULL, false);			\ -} while (0) - -/** - * closure_lock() - lock and initialize a closure. - * @cl:		the closure to lock - * @parent:	the new parent for this closure - * - * The closure must be of one of the types that has a waitlist (otherwise we - * wouldn't be able to sleep on contention). - * - * @parent has exactly the same meaning as in closure_init(); if non null, the - * closure will take a reference on @parent which will be released when it is - * unlocked. - */ -#define closure_lock(cl, parent)				\ -	__closure_lock(__to_internal_closure(cl), parent, &(cl)->wait) - -/** - * closure_delay() - delay some number of jiffies - * @cl:		the closure that will sleep - * @delay:	the delay in jiffies - * - * Takes a refcount on @cl which will be released after @delay jiffies; this may - * be used to have a function run after a delay with continue_at(), or - * closure_sync() may be used for a convoluted version of msleep(). - */ -#define closure_delay(cl, delay)			\ -	__closure_delay(__to_internal_closure(cl), delay, &(cl)->timer) - -#define closure_flush(cl)				\ -	__closure_flush(__to_internal_closure(cl), &(cl)->timer) - -#define closure_flush_sync(cl)				\ -	__closure_flush_sync(__to_internal_closure(cl), &(cl)->timer) - -static inline void __closure_end_sleep(struct closure *cl) -{ -	__set_current_state(TASK_RUNNING); +	cl->parent = parent; +	if (parent) +		closure_get(parent); -	if (atomic_read(&cl->remaining) & CLOSURE_SLEEPING) -		atomic_sub(CLOSURE_SLEEPING, &cl->remaining); -} +	atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER); -static inline void __closure_start_sleep(struct closure *cl) -{ +	closure_debug_create(cl);  	closure_set_ip(cl); -	cl->task = current; -	set_current_state(TASK_UNINTERRUPTIBLE); - -	if (!(atomic_read(&cl->remaining) & CLOSURE_SLEEPING)) -		atomic_add(CLOSURE_SLEEPING, &cl->remaining); -} - -/** - * closure_blocking() - returns true if the closure is in blocking mode. - * - * If a closure is in blocking mode, closure_wait_event() will sleep until the - * condition is true instead of waiting asynchronously. - */ -static inline bool closure_blocking(struct closure *cl) -{ -	return atomic_read(&cl->remaining) & CLOSURE_BLOCKING; -} - -/** - * set_closure_blocking() - put a closure in blocking mode. - * - * If a closure is in blocking mode, closure_wait_event() will sleep until the - * condition is true instead of waiting asynchronously. - * - * Not thread safe - can only be called by the thread running the closure. - */ -static inline void set_closure_blocking(struct closure *cl) -{ -	if (!closure_blocking(cl)) -		atomic_add(CLOSURE_BLOCKING, &cl->remaining);  } -/* - * Not thread safe - can only be called by the thread running the closure. - */ -static inline void clear_closure_blocking(struct closure *cl) +static inline void closure_init_stack(struct closure *cl)  { -	if (closure_blocking(cl)) -		atomic_sub(CLOSURE_BLOCKING, &cl->remaining); +	memset(cl, 0, sizeof(struct closure)); +	atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER|CLOSURE_STACK);  }  /** - * closure_wake_up() - wake up all closures on a wait list. + * closure_wake_up - wake up all closures on a wait list.   */  static inline void closure_wake_up(struct closure_waitlist *list)  { @@ -540,96 +303,19 @@ static inline void closure_wake_up(struct closure_waitlist *list)  	__closure_wake_up(list);  } -/* - * Wait on an event, synchronously or asynchronously - analogous to wait_event() - * but for closures. - * - * The loop is oddly structured so as to avoid a race; we must check the - * condition again after we've added ourself to the waitlist. We know if we were - * already on the waitlist because closure_wait() returns false; thus, we only - * schedule or break if closure_wait() returns false. If it returns true, we - * just loop again - rechecking the condition. - * - * The __closure_wake_up() is necessary because we may race with the event - * becoming true; i.e. we see event false -> wait -> recheck condition, but the - * thread that made the event true may have called closure_wake_up() before we - * added ourself to the wait list. - * - * We have to call closure_sync() at the end instead of just - * __closure_end_sleep() because a different thread might've called - * closure_wake_up() before us and gotten preempted before they dropped the - * refcount on our closure. If this was a stack allocated closure, that would be - * bad. - */ -#define __closure_wait_event(list, cl, condition, _block)		\ -({									\ -	bool block = _block;						\ -	typeof(condition) ret;						\ -									\ -	while (1) {							\ -		ret = (condition);					\ -		if (ret) {						\ -			__closure_wake_up(list);			\ -			if (block)					\ -				closure_sync(cl);			\ -									\ -			break;						\ -		}							\ -									\ -		if (block)						\ -			__closure_start_sleep(cl);			\ -									\ -		if (!closure_wait(list, cl)) {				\ -			if (!block)					\ -				break;					\ -									\ -			schedule();					\ -		}							\ -	}								\ -									\ -	ret;								\ -}) -  /** - * closure_wait_event() - wait on a condition, synchronously or asynchronously. - * @list:	the wait list to wait on - * @cl:		the closure that is doing the waiting - * @condition:	a C expression for the event to wait for - * - * If the closure is in blocking mode, sleeps until the @condition evaluates to - * true - exactly like wait_event(). + * continue_at - jump to another function with barrier   * - * If the closure is not in blocking mode, waits asynchronously; if the - * condition is currently false the @cl is put onto @list and returns. @list - * owns a refcount on @cl; closure_sync() or continue_at() may be used later to - * wait for another thread to wake up @list, which drops the refcount on @cl. + * After @cl is no longer waiting on anything (i.e. all outstanding refs have + * been dropped with closure_put()), it will resume execution at @fn running out + * of @wq (or, if @wq is NULL, @fn will be called by closure_put() directly).   * - * Returns the value of @condition; @cl will be on @list iff @condition was - * false. + * NOTE: This macro expands to a return in the calling function!   * - * closure_wake_up(@list) must be called after changing any variable that could - * cause @condition to become true. + * This is because after calling continue_at() you no longer have a ref on @cl, + * and whatever @cl owns may be freed out from under you - a running closure fn + * has a ref on its own closure which continue_at() drops.   */ -#define closure_wait_event(list, cl, condition)				\ -	__closure_wait_event(list, cl, condition, closure_blocking(cl)) - -#define closure_wait_event_async(list, cl, condition)			\ -	__closure_wait_event(list, cl, condition, false) - -#define closure_wait_event_sync(list, cl, condition)			\ -	__closure_wait_event(list, cl, condition, true) - -static inline void set_closure_fn(struct closure *cl, closure_fn *fn, -				  struct workqueue_struct *wq) -{ -	BUG_ON(object_is_on_stack(cl)); -	closure_set_ip(cl); -	cl->fn = fn; -	cl->wq = wq; -	/* between atomic_dec() in closure_put() */ -	smp_mb__before_atomic_dec(); -} -  #define continue_at(_cl, _fn, _wq)					\  do {									\  	set_closure_fn(_cl, _fn, _wq);					\ @@ -637,15 +323,44 @@ do {									\  	return;								\  } while (0) +/** + * closure_return - finish execution of a closure + * + * This is used to indicate that @cl is finished: when all outstanding refs on + * @cl have been dropped @cl's ref on its parent closure (as passed to + * closure_init()) will be dropped, if one was specified - thus this can be + * thought of as returning to the parent closure. + */  #define closure_return(_cl)	continue_at((_cl), NULL, NULL) +/** + * continue_at_nobarrier - jump to another function without barrier + * + * Causes @fn to be executed out of @cl, in @wq context (or called directly if + * @wq is NULL). + * + * NOTE: like continue_at(), this macro expands to a return in the caller! + * + * The ref the caller of continue_at_nobarrier() had on @cl is now owned by @fn, + * thus it's not safe to touch anything protected by @cl after a + * continue_at_nobarrier(). + */  #define continue_at_nobarrier(_cl, _fn, _wq)				\  do {									\  	set_closure_fn(_cl, _fn, _wq);					\ -	closure_queue(cl);						\ +	closure_queue(_cl);						\  	return;								\  } while (0) +/** + * closure_return - finish execution of a closure, with destructor + * + * Works like closure_return(), except @destructor will be called when all + * outstanding refs on @cl have been dropped; @destructor may be used to safely + * free the memory occupied by @cl, and it is called with the ref on the parent + * closure still held - so @destructor could safely return an item to a + * freelist protected by @cl's parent. + */  #define closure_return_with_destructor(_cl, _destructor)		\  do {									\  	set_closure_fn(_cl, _destructor, NULL);				\ @@ -653,6 +368,13 @@ do {									\  	return;								\  } while (0) +/** + * closure_call - execute @fn out of a new, uninitialized closure + * + * Typically used when running out of one closure, and we want to run @fn + * asynchronously out of a new closure - @parent will then wait for @cl to + * finish. + */  static inline void closure_call(struct closure *cl, closure_fn fn,  				struct workqueue_struct *wq,  				struct closure *parent) @@ -661,12 +383,4 @@ static inline void closure_call(struct closure *cl, closure_fn fn,  	continue_at_nobarrier(cl, fn, wq);  } -static inline void closure_trylock_call(struct closure *cl, closure_fn fn, -					struct workqueue_struct *wq, -					struct closure *parent) -{ -	if (closure_trylock(cl, parent)) -		continue_at_nobarrier(cl, fn, wq); -} -  #endif /* _LINUX_CLOSURE_H */ diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c index 88e6411eab4..8b1f1d5c181 100644 --- a/drivers/md/bcache/debug.c +++ b/drivers/md/bcache/debug.c @@ -8,7 +8,7 @@  #include "bcache.h"  #include "btree.h"  #include "debug.h" -#include "request.h" +#include "extents.h"  #include <linux/console.h>  #include <linux/debugfs.h> @@ -18,305 +18,130 @@  static struct dentry *debug; -const char *bch_ptr_status(struct cache_set *c, const struct bkey *k) -{ -	unsigned i; - -	for (i = 0; i < KEY_PTRS(k); i++) -		if (ptr_available(c, k, i)) { -			struct cache *ca = PTR_CACHE(c, k, i); -			size_t bucket = PTR_BUCKET_NR(c, k, i); -			size_t r = bucket_remainder(c, PTR_OFFSET(k, i)); - -			if (KEY_SIZE(k) + r > c->sb.bucket_size) -				return "bad, length too big"; -			if (bucket <  ca->sb.first_bucket) -				return "bad, short offset"; -			if (bucket >= ca->sb.nbuckets) -				return "bad, offset past end of device"; -			if (ptr_stale(c, k, i)) -				return "stale"; -		} - -	if (!bkey_cmp(k, &ZERO_KEY)) -		return "bad, null key"; -	if (!KEY_PTRS(k)) -		return "bad, no pointers"; -	if (!KEY_SIZE(k)) -		return "zeroed key"; -	return ""; -} - -int bch_bkey_to_text(char *buf, size_t size, const struct bkey *k) -{ -	unsigned i = 0; -	char *out = buf, *end = buf + size; - -#define p(...)	(out += scnprintf(out, end - out, __VA_ARGS__)) - -	p("%llu:%llu len %llu -> [", KEY_INODE(k), KEY_OFFSET(k), KEY_SIZE(k)); - -	if (KEY_PTRS(k)) -		while (1) { -			p("%llu:%llu gen %llu", -			  PTR_DEV(k, i), PTR_OFFSET(k, i), PTR_GEN(k, i)); - -			if (++i == KEY_PTRS(k)) -				break; - -			p(", "); -		} - -	p("]"); - -	if (KEY_DIRTY(k)) -		p(" dirty"); -	if (KEY_CSUM(k)) -		p(" cs%llu %llx", KEY_CSUM(k), k->ptr[1]); -#undef p -	return out - buf; -} - -int bch_btree_to_text(char *buf, size_t size, const struct btree *b) -{ -	return scnprintf(buf, size, "%zu level %i/%i", -			 PTR_BUCKET_NR(b->c, &b->key, 0), -			 b->level, b->c->root ? b->c->root->level : -1); -} - -#if defined(CONFIG_BCACHE_DEBUG) || defined(CONFIG_BCACHE_EDEBUG) - -static bool skipped_backwards(struct btree *b, struct bkey *k) -{ -	return bkey_cmp(k, (!b->level) -			? &START_KEY(bkey_next(k)) -			: bkey_next(k)) > 0; -} - -static void dump_bset(struct btree *b, struct bset *i) -{ -	struct bkey *k; -	unsigned j; -	char buf[80]; - -	for (k = i->start; k < end(i); k = bkey_next(k)) { -		bch_bkey_to_text(buf, sizeof(buf), k); -		printk(KERN_ERR "block %zu key %zi/%u: %s", index(i, b), -		       (uint64_t *) k - i->d, i->keys, buf); - -		for (j = 0; j < KEY_PTRS(k); j++) { -			size_t n = PTR_BUCKET_NR(b->c, k, j); -			printk(" bucket %zu", n); - -			if (n >= b->c->sb.first_bucket && n < b->c->sb.nbuckets) -				printk(" prio %i", -				       PTR_BUCKET(b->c, k, j)->prio); -		} - -		printk(" %s\n", bch_ptr_status(b->c, k)); - -		if (bkey_next(k) < end(i) && -		    skipped_backwards(b, k)) -			printk(KERN_ERR "Key skipped backwards\n"); -	} -} - -#endif -  #ifdef CONFIG_BCACHE_DEBUG -void bch_btree_verify(struct btree *b, struct bset *new) +#define for_each_written_bset(b, start, i)				\ +	for (i = (start);						\ +	     (void *) i < (void *) (start) + (KEY_SIZE(&b->key) << 9) &&\ +	     i->seq == (start)->seq;					\ +	     i = (void *) i + set_blocks(i, block_bytes(b->c)) *	\ +		 block_bytes(b->c)) + +void bch_btree_verify(struct btree *b)  {  	struct btree *v = b->c->verify_data; -	struct closure cl; -	closure_init_stack(&cl); +	struct bset *ondisk, *sorted, *inmemory; +	struct bio *bio; -	if (!b->c->verify) +	if (!b->c->verify || !b->c->verify_ondisk)  		return; -	closure_wait_event(&b->io.wait, &cl, -			   atomic_read(&b->io.cl.remaining) == -1); - +	down(&b->io_mutex);  	mutex_lock(&b->c->verify_lock); +	ondisk = b->c->verify_ondisk; +	sorted = b->c->verify_data->keys.set->data; +	inmemory = b->keys.set->data; +  	bkey_copy(&v->key, &b->key);  	v->written = 0;  	v->level = b->level; +	v->keys.ops = b->keys.ops; + +	bio = bch_bbio_alloc(b->c); +	bio->bi_bdev		= PTR_CACHE(b->c, &b->key, 0)->bdev; +	bio->bi_iter.bi_sector	= PTR_OFFSET(&b->key, 0); +	bio->bi_iter.bi_size	= KEY_SIZE(&v->key) << 9; +	bch_bio_map(bio, sorted); -	bch_btree_node_read(v); -	closure_wait_event(&v->io.wait, &cl, -			   atomic_read(&b->io.cl.remaining) == -1); +	submit_bio_wait(REQ_META|READ_SYNC, bio); +	bch_bbio_free(bio, b->c); -	if (new->keys != v->sets[0].data->keys || -	    memcmp(new->start, -		   v->sets[0].data->start, -		   (void *) end(new) - (void *) new->start)) { -		unsigned i, j; +	memcpy(ondisk, sorted, KEY_SIZE(&v->key) << 9); + +	bch_btree_node_read_done(v); +	sorted = v->keys.set->data; + +	if (inmemory->keys != sorted->keys || +	    memcmp(inmemory->start, +		   sorted->start, +		   (void *) bset_bkey_last(inmemory) - (void *) inmemory->start)) { +		struct bset *i; +		unsigned j;  		console_lock(); -		printk(KERN_ERR "*** original memory node:\n"); -		for (i = 0; i <= b->nsets; i++) -			dump_bset(b, b->sets[i].data); +		printk(KERN_ERR "*** in memory:\n"); +		bch_dump_bset(&b->keys, inmemory, 0); + +		printk(KERN_ERR "*** read back in:\n"); +		bch_dump_bset(&v->keys, sorted, 0); -		printk(KERN_ERR "*** sorted memory node:\n"); -		dump_bset(b, new); +		for_each_written_bset(b, ondisk, i) { +			unsigned block = ((void *) i - (void *) ondisk) / +				block_bytes(b->c); + +			printk(KERN_ERR "*** on disk block %u:\n", block); +			bch_dump_bset(&b->keys, i, block); +		} -		printk(KERN_ERR "*** on disk node:\n"); -		dump_bset(v, v->sets[0].data); +		printk(KERN_ERR "*** block %zu not written\n", +		       ((void *) i - (void *) ondisk) / block_bytes(b->c)); -		for (j = 0; j < new->keys; j++) -			if (new->d[j] != v->sets[0].data->d[j]) +		for (j = 0; j < inmemory->keys; j++) +			if (inmemory->d[j] != sorted->d[j])  				break; +		printk(KERN_ERR "b->written %u\n", b->written); +  		console_unlock();  		panic("verify failed at %u\n", j);  	}  	mutex_unlock(&b->c->verify_lock); +	up(&b->io_mutex);  } -static void data_verify_endio(struct bio *bio, int error) -{ -	struct closure *cl = bio->bi_private; -	closure_put(cl); -} - -void bch_data_verify(struct search *s) +void bch_data_verify(struct cached_dev *dc, struct bio *bio)  {  	char name[BDEVNAME_SIZE]; -	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); -	struct closure *cl = &s->cl;  	struct bio *check; -	struct bio_vec *bv; +	struct bio_vec bv, *bv2; +	struct bvec_iter iter;  	int i; -	if (!s->unaligned_bvec) -		bio_for_each_segment(bv, s->orig_bio, i) -			bv->bv_offset = 0, bv->bv_len = PAGE_SIZE; - -	check = bio_clone(s->orig_bio, GFP_NOIO); +	check = bio_clone(bio, GFP_NOIO);  	if (!check)  		return;  	if (bio_alloc_pages(check, GFP_NOIO))  		goto out_put; -	check->bi_rw		= READ_SYNC; -	check->bi_private	= cl; -	check->bi_end_io	= data_verify_endio; - -	closure_bio_submit(check, cl, &dc->disk); -	closure_sync(cl); +	submit_bio_wait(READ_SYNC, check); -	bio_for_each_segment(bv, s->orig_bio, i) { -		void *p1 = kmap(bv->bv_page); -		void *p2 = kmap(check->bi_io_vec[i].bv_page); +	bio_for_each_segment(bv, bio, iter) { +		void *p1 = kmap_atomic(bv.bv_page); +		void *p2 = page_address(check->bi_io_vec[iter.bi_idx].bv_page); -		if (memcmp(p1 + bv->bv_offset, -			   p2 + bv->bv_offset, -			   bv->bv_len)) -			printk(KERN_ERR -			       "bcache (%s): verify failed at sector %llu\n", -			       bdevname(dc->bdev, name), -			       (uint64_t) s->orig_bio->bi_sector); +		cache_set_err_on(memcmp(p1 + bv.bv_offset, +					p2 + bv.bv_offset, +					bv.bv_len), +				 dc->disk.c, +				 "verify failed at dev %s sector %llu", +				 bdevname(dc->bdev, name), +				 (uint64_t) bio->bi_iter.bi_sector); -		kunmap(bv->bv_page); -		kunmap(check->bi_io_vec[i].bv_page); +		kunmap_atomic(p1);  	} -	__bio_for_each_segment(bv, check, i, 0) -		__free_page(bv->bv_page); +	bio_for_each_segment_all(bv2, check, i) +		__free_page(bv2->bv_page);  out_put:  	bio_put(check);  }  #endif -#ifdef CONFIG_BCACHE_EDEBUG - -unsigned bch_count_data(struct btree *b) -{ -	unsigned ret = 0; -	struct btree_iter iter; -	struct bkey *k; - -	if (!b->level) -		for_each_key(b, k, &iter) -			ret += KEY_SIZE(k); -	return ret; -} - -static void vdump_bucket_and_panic(struct btree *b, const char *fmt, -				   va_list args) -{ -	unsigned i; -	char buf[80]; - -	console_lock(); - -	for (i = 0; i <= b->nsets; i++) -		dump_bset(b, b->sets[i].data); - -	vprintk(fmt, args); - -	console_unlock(); - -	bch_btree_to_text(buf, sizeof(buf), b); -	panic("at %s\n", buf); -} - -void bch_check_key_order_msg(struct btree *b, struct bset *i, -			     const char *fmt, ...) -{ -	struct bkey *k; - -	if (!i->keys) -		return; - -	for (k = i->start; bkey_next(k) < end(i); k = bkey_next(k)) -		if (skipped_backwards(b, k)) { -			va_list args; -			va_start(args, fmt); - -			vdump_bucket_and_panic(b, fmt, args); -			va_end(args); -		} -} - -void bch_check_keys(struct btree *b, const char *fmt, ...) -{ -	va_list args; -	struct bkey *k, *p = NULL; -	struct btree_iter iter; - -	if (b->level) -		return; - -	for_each_key(b, k, &iter) { -		if (p && bkey_cmp(&START_KEY(p), &START_KEY(k)) > 0) { -			printk(KERN_ERR "Keys out of order:\n"); -			goto bug; -		} - -		if (bch_ptr_invalid(b, k)) -			continue; - -		if (p && bkey_cmp(p, &START_KEY(k)) > 0) { -			printk(KERN_ERR "Overlapping keys:\n"); -			goto bug; -		} -		p = k; -	} -	return; -bug: -	va_start(args, fmt); -	vdump_bucket_and_panic(b, fmt, args); -	va_end(args); -} - -#endif -  #ifdef CONFIG_DEBUG_FS  /* XXX: cache set refcounting */ @@ -361,7 +186,7 @@ static ssize_t bch_dump_read(struct file *file, char __user *buf,  		if (!w)  			break; -		bch_bkey_to_text(kbuf, sizeof(kbuf), &w->key); +		bch_extent_to_text(kbuf, sizeof(kbuf), &w->key);  		i->bytes = snprintf(i->buf, PAGE_SIZE, "%s\n", kbuf);  		bch_keybuf_del(&i->keys, w);  	} diff --git a/drivers/md/bcache/debug.h b/drivers/md/bcache/debug.h index 1c39b5a2489..1f63c195d24 100644 --- a/drivers/md/bcache/debug.h +++ b/drivers/md/bcache/debug.h @@ -1,40 +1,27 @@  #ifndef _BCACHE_DEBUG_H  #define _BCACHE_DEBUG_H -/* Btree/bkey debug printing */ - -int bch_bkey_to_text(char *buf, size_t size, const struct bkey *k); -int bch_btree_to_text(char *buf, size_t size, const struct btree *b); - -#ifdef CONFIG_BCACHE_EDEBUG - -unsigned bch_count_data(struct btree *); -void bch_check_key_order_msg(struct btree *, struct bset *, const char *, ...); -void bch_check_keys(struct btree *, const char *, ...); - -#define bch_check_key_order(b, i)			\ -	bch_check_key_order_msg(b, i, "keys out of order") -#define EBUG_ON(cond)		BUG_ON(cond) - -#else /* EDEBUG */ - -#define bch_count_data(b)				0 -#define bch_check_key_order(b, i)			do {} while (0) -#define bch_check_key_order_msg(b, i, ...)		do {} while (0) -#define bch_check_keys(b, ...)				do {} while (0) -#define EBUG_ON(cond)					do {} while (0) - -#endif +struct bio; +struct cached_dev; +struct cache_set;  #ifdef CONFIG_BCACHE_DEBUG -void bch_btree_verify(struct btree *, struct bset *); -void bch_data_verify(struct search *); +void bch_btree_verify(struct btree *); +void bch_data_verify(struct cached_dev *, struct bio *); + +#define expensive_debug_checks(c)	((c)->expensive_debug_checks) +#define key_merging_disabled(c)		((c)->key_merging_disabled) +#define bypass_torture_test(d)		((d)->bypass_torture_test)  #else /* DEBUG */ -static inline void bch_btree_verify(struct btree *b, struct bset *i) {} -static inline void bch_data_verify(struct search *s) {}; +static inline void bch_btree_verify(struct btree *b) {} +static inline void bch_data_verify(struct cached_dev *dc, struct bio *bio) {} + +#define expensive_debug_checks(c)	0 +#define key_merging_disabled(c)		0 +#define bypass_torture_test(d)		0  #endif diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c new file mode 100644 index 00000000000..3a0de4cf977 --- /dev/null +++ b/drivers/md/bcache/extents.c @@ -0,0 +1,620 @@ +/* + * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com> + * + * Uses a block device as cache for other block devices; optimized for SSDs. + * All allocation is done in buckets, which should match the erase block size + * of the device. + * + * Buckets containing cached data are kept on a heap sorted by priority; + * bucket priority is increased on cache hit, and periodically all the buckets + * on the heap have their priority scaled down. This currently is just used as + * an LRU but in the future should allow for more intelligent heuristics. + * + * Buckets have an 8 bit counter; freeing is accomplished by incrementing the + * counter. Garbage collection is used to remove stale pointers. + * + * Indexing is done via a btree; nodes are not necessarily fully sorted, rather + * as keys are inserted we only sort the pages that have not yet been written. + * When garbage collection is run, we resort the entire node. + * + * All configuration is done via sysfs; see Documentation/bcache.txt. + */ + +#include "bcache.h" +#include "btree.h" +#include "debug.h" +#include "extents.h" +#include "writeback.h" + +static void sort_key_next(struct btree_iter *iter, +			  struct btree_iter_set *i) +{ +	i->k = bkey_next(i->k); + +	if (i->k == i->end) +		*i = iter->data[--iter->used]; +} + +static bool bch_key_sort_cmp(struct btree_iter_set l, +			     struct btree_iter_set r) +{ +	int64_t c = bkey_cmp(l.k, r.k); + +	return c ? c > 0 : l.k < r.k; +} + +static bool __ptr_invalid(struct cache_set *c, const struct bkey *k) +{ +	unsigned i; + +	for (i = 0; i < KEY_PTRS(k); i++) +		if (ptr_available(c, k, i)) { +			struct cache *ca = PTR_CACHE(c, k, i); +			size_t bucket = PTR_BUCKET_NR(c, k, i); +			size_t r = bucket_remainder(c, PTR_OFFSET(k, i)); + +			if (KEY_SIZE(k) + r > c->sb.bucket_size || +			    bucket <  ca->sb.first_bucket || +			    bucket >= ca->sb.nbuckets) +				return true; +		} + +	return false; +} + +/* Common among btree and extent ptrs */ + +static const char *bch_ptr_status(struct cache_set *c, const struct bkey *k) +{ +	unsigned i; + +	for (i = 0; i < KEY_PTRS(k); i++) +		if (ptr_available(c, k, i)) { +			struct cache *ca = PTR_CACHE(c, k, i); +			size_t bucket = PTR_BUCKET_NR(c, k, i); +			size_t r = bucket_remainder(c, PTR_OFFSET(k, i)); + +			if (KEY_SIZE(k) + r > c->sb.bucket_size) +				return "bad, length too big"; +			if (bucket <  ca->sb.first_bucket) +				return "bad, short offset"; +			if (bucket >= ca->sb.nbuckets) +				return "bad, offset past end of device"; +			if (ptr_stale(c, k, i)) +				return "stale"; +		} + +	if (!bkey_cmp(k, &ZERO_KEY)) +		return "bad, null key"; +	if (!KEY_PTRS(k)) +		return "bad, no pointers"; +	if (!KEY_SIZE(k)) +		return "zeroed key"; +	return ""; +} + +void bch_extent_to_text(char *buf, size_t size, const struct bkey *k) +{ +	unsigned i = 0; +	char *out = buf, *end = buf + size; + +#define p(...)	(out += scnprintf(out, end - out, __VA_ARGS__)) + +	p("%llu:%llu len %llu -> [", KEY_INODE(k), KEY_START(k), KEY_SIZE(k)); + +	for (i = 0; i < KEY_PTRS(k); i++) { +		if (i) +			p(", "); + +		if (PTR_DEV(k, i) == PTR_CHECK_DEV) +			p("check dev"); +		else +			p("%llu:%llu gen %llu", PTR_DEV(k, i), +			  PTR_OFFSET(k, i), PTR_GEN(k, i)); +	} + +	p("]"); + +	if (KEY_DIRTY(k)) +		p(" dirty"); +	if (KEY_CSUM(k)) +		p(" cs%llu %llx", KEY_CSUM(k), k->ptr[1]); +#undef p +} + +static void bch_bkey_dump(struct btree_keys *keys, const struct bkey *k) +{ +	struct btree *b = container_of(keys, struct btree, keys); +	unsigned j; +	char buf[80]; + +	bch_extent_to_text(buf, sizeof(buf), k); +	printk(" %s", buf); + +	for (j = 0; j < KEY_PTRS(k); j++) { +		size_t n = PTR_BUCKET_NR(b->c, k, j); +		printk(" bucket %zu", n); + +		if (n >= b->c->sb.first_bucket && n < b->c->sb.nbuckets) +			printk(" prio %i", +			       PTR_BUCKET(b->c, k, j)->prio); +	} + +	printk(" %s\n", bch_ptr_status(b->c, k)); +} + +/* Btree ptrs */ + +bool __bch_btree_ptr_invalid(struct cache_set *c, const struct bkey *k) +{ +	char buf[80]; + +	if (!KEY_PTRS(k) || !KEY_SIZE(k) || KEY_DIRTY(k)) +		goto bad; + +	if (__ptr_invalid(c, k)) +		goto bad; + +	return false; +bad: +	bch_extent_to_text(buf, sizeof(buf), k); +	cache_bug(c, "spotted btree ptr %s: %s", buf, bch_ptr_status(c, k)); +	return true; +} + +static bool bch_btree_ptr_invalid(struct btree_keys *bk, const struct bkey *k) +{ +	struct btree *b = container_of(bk, struct btree, keys); +	return __bch_btree_ptr_invalid(b->c, k); +} + +static bool btree_ptr_bad_expensive(struct btree *b, const struct bkey *k) +{ +	unsigned i; +	char buf[80]; +	struct bucket *g; + +	if (mutex_trylock(&b->c->bucket_lock)) { +		for (i = 0; i < KEY_PTRS(k); i++) +			if (ptr_available(b->c, k, i)) { +				g = PTR_BUCKET(b->c, k, i); + +				if (KEY_DIRTY(k) || +				    g->prio != BTREE_PRIO || +				    (b->c->gc_mark_valid && +				     GC_MARK(g) != GC_MARK_METADATA)) +					goto err; +			} + +		mutex_unlock(&b->c->bucket_lock); +	} + +	return false; +err: +	mutex_unlock(&b->c->bucket_lock); +	bch_extent_to_text(buf, sizeof(buf), k); +	btree_bug(b, +"inconsistent btree pointer %s: bucket %zi pin %i prio %i gen %i last_gc %i mark %llu", +		  buf, PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin), +		  g->prio, g->gen, g->last_gc, GC_MARK(g)); +	return true; +} + +static bool bch_btree_ptr_bad(struct btree_keys *bk, const struct bkey *k) +{ +	struct btree *b = container_of(bk, struct btree, keys); +	unsigned i; + +	if (!bkey_cmp(k, &ZERO_KEY) || +	    !KEY_PTRS(k) || +	    bch_ptr_invalid(bk, k)) +		return true; + +	for (i = 0; i < KEY_PTRS(k); i++) +		if (!ptr_available(b->c, k, i) || +		    ptr_stale(b->c, k, i)) +			return true; + +	if (expensive_debug_checks(b->c) && +	    btree_ptr_bad_expensive(b, k)) +		return true; + +	return false; +} + +static bool bch_btree_ptr_insert_fixup(struct btree_keys *bk, +				       struct bkey *insert, +				       struct btree_iter *iter, +				       struct bkey *replace_key) +{ +	struct btree *b = container_of(bk, struct btree, keys); + +	if (!KEY_OFFSET(insert)) +		btree_current_write(b)->prio_blocked++; + +	return false; +} + +const struct btree_keys_ops bch_btree_keys_ops = { +	.sort_cmp	= bch_key_sort_cmp, +	.insert_fixup	= bch_btree_ptr_insert_fixup, +	.key_invalid	= bch_btree_ptr_invalid, +	.key_bad	= bch_btree_ptr_bad, +	.key_to_text	= bch_extent_to_text, +	.key_dump	= bch_bkey_dump, +}; + +/* Extents */ + +/* + * Returns true if l > r - unless l == r, in which case returns true if l is + * older than r. + * + * Necessary for btree_sort_fixup() - if there are multiple keys that compare + * equal in different sets, we have to process them newest to oldest. + */ +static bool bch_extent_sort_cmp(struct btree_iter_set l, +				struct btree_iter_set r) +{ +	int64_t c = bkey_cmp(&START_KEY(l.k), &START_KEY(r.k)); + +	return c ? c > 0 : l.k < r.k; +} + +static struct bkey *bch_extent_sort_fixup(struct btree_iter *iter, +					  struct bkey *tmp) +{ +	while (iter->used > 1) { +		struct btree_iter_set *top = iter->data, *i = top + 1; + +		if (iter->used > 2 && +		    bch_extent_sort_cmp(i[0], i[1])) +			i++; + +		if (bkey_cmp(top->k, &START_KEY(i->k)) <= 0) +			break; + +		if (!KEY_SIZE(i->k)) { +			sort_key_next(iter, i); +			heap_sift(iter, i - top, bch_extent_sort_cmp); +			continue; +		} + +		if (top->k > i->k) { +			if (bkey_cmp(top->k, i->k) >= 0) +				sort_key_next(iter, i); +			else +				bch_cut_front(top->k, i->k); + +			heap_sift(iter, i - top, bch_extent_sort_cmp); +		} else { +			/* can't happen because of comparison func */ +			BUG_ON(!bkey_cmp(&START_KEY(top->k), &START_KEY(i->k))); + +			if (bkey_cmp(i->k, top->k) < 0) { +				bkey_copy(tmp, top->k); + +				bch_cut_back(&START_KEY(i->k), tmp); +				bch_cut_front(i->k, top->k); +				heap_sift(iter, 0, bch_extent_sort_cmp); + +				return tmp; +			} else { +				bch_cut_back(&START_KEY(i->k), top->k); +			} +		} +	} + +	return NULL; +} + +static void bch_subtract_dirty(struct bkey *k, +			   struct cache_set *c, +			   uint64_t offset, +			   int sectors) +{ +	if (KEY_DIRTY(k)) +		bcache_dev_sectors_dirty_add(c, KEY_INODE(k), +					     offset, -sectors); +} + +static bool bch_extent_insert_fixup(struct btree_keys *b, +				    struct bkey *insert, +				    struct btree_iter *iter, +				    struct bkey *replace_key) +{ +	struct cache_set *c = container_of(b, struct btree, keys)->c; + +	uint64_t old_offset; +	unsigned old_size, sectors_found = 0; + +	BUG_ON(!KEY_OFFSET(insert)); +	BUG_ON(!KEY_SIZE(insert)); + +	while (1) { +		struct bkey *k = bch_btree_iter_next(iter); +		if (!k) +			break; + +		if (bkey_cmp(&START_KEY(k), insert) >= 0) { +			if (KEY_SIZE(k)) +				break; +			else +				continue; +		} + +		if (bkey_cmp(k, &START_KEY(insert)) <= 0) +			continue; + +		old_offset = KEY_START(k); +		old_size = KEY_SIZE(k); + +		/* +		 * We might overlap with 0 size extents; we can't skip these +		 * because if they're in the set we're inserting to we have to +		 * adjust them so they don't overlap with the key we're +		 * inserting. But we don't want to check them for replace +		 * operations. +		 */ + +		if (replace_key && KEY_SIZE(k)) { +			/* +			 * k might have been split since we inserted/found the +			 * key we're replacing +			 */ +			unsigned i; +			uint64_t offset = KEY_START(k) - +				KEY_START(replace_key); + +			/* But it must be a subset of the replace key */ +			if (KEY_START(k) < KEY_START(replace_key) || +			    KEY_OFFSET(k) > KEY_OFFSET(replace_key)) +				goto check_failed; + +			/* We didn't find a key that we were supposed to */ +			if (KEY_START(k) > KEY_START(insert) + sectors_found) +				goto check_failed; + +			if (!bch_bkey_equal_header(k, replace_key)) +				goto check_failed; + +			/* skip past gen */ +			offset <<= 8; + +			BUG_ON(!KEY_PTRS(replace_key)); + +			for (i = 0; i < KEY_PTRS(replace_key); i++) +				if (k->ptr[i] != replace_key->ptr[i] + offset) +					goto check_failed; + +			sectors_found = KEY_OFFSET(k) - KEY_START(insert); +		} + +		if (bkey_cmp(insert, k) < 0 && +		    bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0) { +			/* +			 * We overlapped in the middle of an existing key: that +			 * means we have to split the old key. But we have to do +			 * slightly different things depending on whether the +			 * old key has been written out yet. +			 */ + +			struct bkey *top; + +			bch_subtract_dirty(k, c, KEY_START(insert), +				       KEY_SIZE(insert)); + +			if (bkey_written(b, k)) { +				/* +				 * We insert a new key to cover the top of the +				 * old key, and the old key is modified in place +				 * to represent the bottom split. +				 * +				 * It's completely arbitrary whether the new key +				 * is the top or the bottom, but it has to match +				 * up with what btree_sort_fixup() does - it +				 * doesn't check for this kind of overlap, it +				 * depends on us inserting a new key for the top +				 * here. +				 */ +				top = bch_bset_search(b, bset_tree_last(b), +						      insert); +				bch_bset_insert(b, top, k); +			} else { +				BKEY_PADDED(key) temp; +				bkey_copy(&temp.key, k); +				bch_bset_insert(b, k, &temp.key); +				top = bkey_next(k); +			} + +			bch_cut_front(insert, top); +			bch_cut_back(&START_KEY(insert), k); +			bch_bset_fix_invalidated_key(b, k); +			goto out; +		} + +		if (bkey_cmp(insert, k) < 0) { +			bch_cut_front(insert, k); +		} else { +			if (bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0) +				old_offset = KEY_START(insert); + +			if (bkey_written(b, k) && +			    bkey_cmp(&START_KEY(insert), &START_KEY(k)) <= 0) { +				/* +				 * Completely overwrote, so we don't have to +				 * invalidate the binary search tree +				 */ +				bch_cut_front(k, k); +			} else { +				__bch_cut_back(&START_KEY(insert), k); +				bch_bset_fix_invalidated_key(b, k); +			} +		} + +		bch_subtract_dirty(k, c, old_offset, old_size - KEY_SIZE(k)); +	} + +check_failed: +	if (replace_key) { +		if (!sectors_found) { +			return true; +		} else if (sectors_found < KEY_SIZE(insert)) { +			SET_KEY_OFFSET(insert, KEY_OFFSET(insert) - +				       (KEY_SIZE(insert) - sectors_found)); +			SET_KEY_SIZE(insert, sectors_found); +		} +	} +out: +	if (KEY_DIRTY(insert)) +		bcache_dev_sectors_dirty_add(c, KEY_INODE(insert), +					     KEY_START(insert), +					     KEY_SIZE(insert)); + +	return false; +} + +static bool bch_extent_invalid(struct btree_keys *bk, const struct bkey *k) +{ +	struct btree *b = container_of(bk, struct btree, keys); +	char buf[80]; + +	if (!KEY_SIZE(k)) +		return true; + +	if (KEY_SIZE(k) > KEY_OFFSET(k)) +		goto bad; + +	if (__ptr_invalid(b->c, k)) +		goto bad; + +	return false; +bad: +	bch_extent_to_text(buf, sizeof(buf), k); +	cache_bug(b->c, "spotted extent %s: %s", buf, bch_ptr_status(b->c, k)); +	return true; +} + +static bool bch_extent_bad_expensive(struct btree *b, const struct bkey *k, +				     unsigned ptr) +{ +	struct bucket *g = PTR_BUCKET(b->c, k, ptr); +	char buf[80]; + +	if (mutex_trylock(&b->c->bucket_lock)) { +		if (b->c->gc_mark_valid && +		    (!GC_MARK(g) || +		     GC_MARK(g) == GC_MARK_METADATA || +		     (GC_MARK(g) != GC_MARK_DIRTY && KEY_DIRTY(k)))) +			goto err; + +		if (g->prio == BTREE_PRIO) +			goto err; + +		mutex_unlock(&b->c->bucket_lock); +	} + +	return false; +err: +	mutex_unlock(&b->c->bucket_lock); +	bch_extent_to_text(buf, sizeof(buf), k); +	btree_bug(b, +"inconsistent extent pointer %s:\nbucket %zu pin %i prio %i gen %i last_gc %i mark %llu", +		  buf, PTR_BUCKET_NR(b->c, k, ptr), atomic_read(&g->pin), +		  g->prio, g->gen, g->last_gc, GC_MARK(g)); +	return true; +} + +static bool bch_extent_bad(struct btree_keys *bk, const struct bkey *k) +{ +	struct btree *b = container_of(bk, struct btree, keys); +	struct bucket *g; +	unsigned i, stale; + +	if (!KEY_PTRS(k) || +	    bch_extent_invalid(bk, k)) +		return true; + +	for (i = 0; i < KEY_PTRS(k); i++) +		if (!ptr_available(b->c, k, i)) +			return true; + +	if (!expensive_debug_checks(b->c) && KEY_DIRTY(k)) +		return false; + +	for (i = 0; i < KEY_PTRS(k); i++) { +		g = PTR_BUCKET(b->c, k, i); +		stale = ptr_stale(b->c, k, i); + +		btree_bug_on(stale > 96, b, +			     "key too stale: %i, need_gc %u", +			     stale, b->c->need_gc); + +		btree_bug_on(stale && KEY_DIRTY(k) && KEY_SIZE(k), +			     b, "stale dirty pointer"); + +		if (stale) +			return true; + +		if (expensive_debug_checks(b->c) && +		    bch_extent_bad_expensive(b, k, i)) +			return true; +	} + +	return false; +} + +static uint64_t merge_chksums(struct bkey *l, struct bkey *r) +{ +	return (l->ptr[KEY_PTRS(l)] + r->ptr[KEY_PTRS(r)]) & +		~((uint64_t)1 << 63); +} + +static bool bch_extent_merge(struct btree_keys *bk, struct bkey *l, struct bkey *r) +{ +	struct btree *b = container_of(bk, struct btree, keys); +	unsigned i; + +	if (key_merging_disabled(b->c)) +		return false; + +	for (i = 0; i < KEY_PTRS(l); i++) +		if (l->ptr[i] + PTR(0, KEY_SIZE(l), 0) != r->ptr[i] || +		    PTR_BUCKET_NR(b->c, l, i) != PTR_BUCKET_NR(b->c, r, i)) +			return false; + +	/* Keys with no pointers aren't restricted to one bucket and could +	 * overflow KEY_SIZE +	 */ +	if (KEY_SIZE(l) + KEY_SIZE(r) > USHRT_MAX) { +		SET_KEY_OFFSET(l, KEY_OFFSET(l) + USHRT_MAX - KEY_SIZE(l)); +		SET_KEY_SIZE(l, USHRT_MAX); + +		bch_cut_front(l, r); +		return false; +	} + +	if (KEY_CSUM(l)) { +		if (KEY_CSUM(r)) +			l->ptr[KEY_PTRS(l)] = merge_chksums(l, r); +		else +			SET_KEY_CSUM(l, 0); +	} + +	SET_KEY_OFFSET(l, KEY_OFFSET(l) + KEY_SIZE(r)); +	SET_KEY_SIZE(l, KEY_SIZE(l) + KEY_SIZE(r)); + +	return true; +} + +const struct btree_keys_ops bch_extent_keys_ops = { +	.sort_cmp	= bch_extent_sort_cmp, +	.sort_fixup	= bch_extent_sort_fixup, +	.insert_fixup	= bch_extent_insert_fixup, +	.key_invalid	= bch_extent_invalid, +	.key_bad	= bch_extent_bad, +	.key_merge	= bch_extent_merge, +	.key_to_text	= bch_extent_to_text, +	.key_dump	= bch_bkey_dump, +	.is_extents	= true, +}; diff --git a/drivers/md/bcache/extents.h b/drivers/md/bcache/extents.h new file mode 100644 index 00000000000..e4e23409782 --- /dev/null +++ b/drivers/md/bcache/extents.h @@ -0,0 +1,13 @@ +#ifndef _BCACHE_EXTENTS_H +#define _BCACHE_EXTENTS_H + +extern const struct btree_keys_ops bch_btree_keys_ops; +extern const struct btree_keys_ops bch_extent_keys_ops; + +struct bkey; +struct cache_set; + +void bch_extent_to_text(char *, size_t, const struct bkey *); +bool __bch_btree_ptr_invalid(struct cache_set *, const struct bkey *); + +#endif /* _BCACHE_EXTENTS_H */ diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c index 9056632995b..fa028fa82df 100644 --- a/drivers/md/bcache/io.c +++ b/drivers/md/bcache/io.c @@ -11,178 +11,40 @@  #include <linux/blkdev.h> -static void bch_bi_idx_hack_endio(struct bio *bio, int error) -{ -	struct bio *p = bio->bi_private; - -	bio_endio(p, error); -	bio_put(bio); -} - -static void bch_generic_make_request_hack(struct bio *bio) -{ -	if (bio->bi_idx) { -		struct bio *clone = bio_alloc(GFP_NOIO, bio_segments(bio)); - -		memcpy(clone->bi_io_vec, -		       bio_iovec(bio), -		       bio_segments(bio) * sizeof(struct bio_vec)); - -		clone->bi_sector	= bio->bi_sector; -		clone->bi_bdev		= bio->bi_bdev; -		clone->bi_rw		= bio->bi_rw; -		clone->bi_vcnt		= bio_segments(bio); -		clone->bi_size		= bio->bi_size; - -		clone->bi_private	= bio; -		clone->bi_end_io	= bch_bi_idx_hack_endio; - -		bio = clone; -	} - -	/* -	 * Hack, since drivers that clone bios clone up to bi_max_vecs, but our -	 * bios might have had more than that (before we split them per device -	 * limitations). -	 * -	 * To be taken out once immutable bvec stuff is in. -	 */ -	bio->bi_max_vecs = bio->bi_vcnt; - -	generic_make_request(bio); -} - -/** - * bch_bio_split - split a bio - * @bio:	bio to split - * @sectors:	number of sectors to split from the front of @bio - * @gfp:	gfp mask - * @bs:		bio set to allocate from - * - * Allocates and returns a new bio which represents @sectors from the start of - * @bio, and updates @bio to represent the remaining sectors. - * - * If bio_sectors(@bio) was less than or equal to @sectors, returns @bio - * unchanged. - * - * The newly allocated bio will point to @bio's bi_io_vec, if the split was on a - * bvec boundry; it is the caller's responsibility to ensure that @bio is not - * freed before the split. - */ -struct bio *bch_bio_split(struct bio *bio, int sectors, -			  gfp_t gfp, struct bio_set *bs) -{ -	unsigned idx = bio->bi_idx, vcnt = 0, nbytes = sectors << 9; -	struct bio_vec *bv; -	struct bio *ret = NULL; - -	BUG_ON(sectors <= 0); - -	if (sectors >= bio_sectors(bio)) -		return bio; - -	if (bio->bi_rw & REQ_DISCARD) { -		ret = bio_alloc_bioset(gfp, 1, bs); -		if (!ret) -			return NULL; -		idx = 0; -		goto out; -	} - -	bio_for_each_segment(bv, bio, idx) { -		vcnt = idx - bio->bi_idx; - -		if (!nbytes) { -			ret = bio_alloc_bioset(gfp, vcnt, bs); -			if (!ret) -				return NULL; - -			memcpy(ret->bi_io_vec, bio_iovec(bio), -			       sizeof(struct bio_vec) * vcnt); - -			break; -		} else if (nbytes < bv->bv_len) { -			ret = bio_alloc_bioset(gfp, ++vcnt, bs); -			if (!ret) -				return NULL; - -			memcpy(ret->bi_io_vec, bio_iovec(bio), -			       sizeof(struct bio_vec) * vcnt); - -			ret->bi_io_vec[vcnt - 1].bv_len = nbytes; -			bv->bv_offset	+= nbytes; -			bv->bv_len	-= nbytes; -			break; -		} - -		nbytes -= bv->bv_len; -	} -out: -	ret->bi_bdev	= bio->bi_bdev; -	ret->bi_sector	= bio->bi_sector; -	ret->bi_size	= sectors << 9; -	ret->bi_rw	= bio->bi_rw; -	ret->bi_vcnt	= vcnt; -	ret->bi_max_vecs = vcnt; - -	bio->bi_sector	+= sectors; -	bio->bi_size	-= sectors << 9; -	bio->bi_idx	 = idx; - -	if (bio_integrity(bio)) { -		if (bio_integrity_clone(ret, bio, gfp)) { -			bio_put(ret); -			return NULL; -		} - -		bio_integrity_trim(ret, 0, bio_sectors(ret)); -		bio_integrity_trim(bio, bio_sectors(ret), bio_sectors(bio)); -	} - -	return ret; -} -  static unsigned bch_bio_max_sectors(struct bio *bio)  { -	unsigned ret = bio_sectors(bio);  	struct request_queue *q = bdev_get_queue(bio->bi_bdev); -	unsigned max_segments = min_t(unsigned, BIO_MAX_PAGES, -				      queue_max_segments(q)); +	struct bio_vec bv; +	struct bvec_iter iter; +	unsigned ret = 0, seg = 0;  	if (bio->bi_rw & REQ_DISCARD) -		return min(ret, q->limits.max_discard_sectors); - -	if (bio_segments(bio) > max_segments || -	    q->merge_bvec_fn) { -		struct bio_vec *bv; -		int i, seg = 0; - -		ret = 0; - -		bio_for_each_segment(bv, bio, i) { -			struct bvec_merge_data bvm = { -				.bi_bdev	= bio->bi_bdev, -				.bi_sector	= bio->bi_sector, -				.bi_size	= ret << 9, -				.bi_rw		= bio->bi_rw, -			}; - -			if (seg == max_segments) -				break; +		return min(bio_sectors(bio), q->limits.max_discard_sectors); + +	bio_for_each_segment(bv, bio, iter) { +		struct bvec_merge_data bvm = { +			.bi_bdev	= bio->bi_bdev, +			.bi_sector	= bio->bi_iter.bi_sector, +			.bi_size	= ret << 9, +			.bi_rw		= bio->bi_rw, +		}; + +		if (seg == min_t(unsigned, BIO_MAX_PAGES, +				 queue_max_segments(q))) +			break; -			if (q->merge_bvec_fn && -			    q->merge_bvec_fn(q, &bvm, bv) < (int) bv->bv_len) -				break; +		if (q->merge_bvec_fn && +		    q->merge_bvec_fn(q, &bvm, &bv) < (int) bv.bv_len) +			break; -			seg++; -			ret += bv->bv_len >> 9; -		} +		seg++; +		ret += bv.bv_len >> 9;  	}  	ret = min(ret, queue_max_sectors(q));  	WARN_ON(!ret); -	ret = max_t(int, ret, bio_iovec(bio)->bv_len >> 9); +	ret = max_t(int, ret, bio_iovec(bio).bv_len >> 9);  	return ret;  } @@ -193,7 +55,7 @@ static void bch_bio_submit_split_done(struct closure *cl)  	s->bio->bi_end_io = s->bi_end_io;  	s->bio->bi_private = s->bi_private; -	bio_endio(s->bio, 0); +	bio_endio_nodec(s->bio, 0);  	closure_debug_destroy(&s->cl);  	mempool_free(s, s->p->bio_split_hook); @@ -232,19 +94,19 @@ void bch_generic_make_request(struct bio *bio, struct bio_split_pool *p)  	bio_get(bio);  	do { -		n = bch_bio_split(bio, bch_bio_max_sectors(bio), -				  GFP_NOIO, s->p->bio_split); +		n = bio_next_split(bio, bch_bio_max_sectors(bio), +				   GFP_NOIO, s->p->bio_split);  		n->bi_end_io	= bch_bio_submit_split_endio;  		n->bi_private	= &s->cl;  		closure_get(&s->cl); -		bch_generic_make_request_hack(n); +		generic_make_request(n);  	} while (n != bio);  	continue_at(&s->cl, bch_bio_submit_split_done, NULL);  submit: -	bch_generic_make_request_hack(bio); +	generic_make_request(bio);  }  /* Bios with headers */ @@ -272,8 +134,8 @@ void __bch_submit_bbio(struct bio *bio, struct cache_set *c)  {  	struct bbio *b = container_of(bio, struct bbio, bio); -	bio->bi_sector	= PTR_OFFSET(&b->key, 0); -	bio->bi_bdev	= PTR_CACHE(c, &b->key, 0)->bdev; +	bio->bi_iter.bi_sector	= PTR_OFFSET(&b->key, 0); +	bio->bi_bdev		= PTR_CACHE(c, &b->key, 0)->bdev;  	b->submit_time_us = local_clock_us();  	closure_bio_submit(bio, bio->bi_private, PTR_CACHE(c, &b->key, 0)); diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index ba95ab84b2b..59e82021b5b 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -7,7 +7,6 @@  #include "bcache.h"  #include "btree.h"  #include "debug.h" -#include "request.h"  #include <trace/events/bcache.h> @@ -31,35 +30,38 @@ static void journal_read_endio(struct bio *bio, int error)  }  static int journal_read_bucket(struct cache *ca, struct list_head *list, -			       struct btree_op *op, unsigned bucket_index) +			       unsigned bucket_index)  {  	struct journal_device *ja = &ca->journal;  	struct bio *bio = &ja->bio;  	struct journal_replay *i;  	struct jset *j, *data = ca->set->journal.w[0].data; +	struct closure cl;  	unsigned len, left, offset = 0;  	int ret = 0;  	sector_t bucket = bucket_to_sector(ca->set, ca->sb.d[bucket_index]); -	pr_debug("reading %llu", (uint64_t) bucket); +	closure_init_stack(&cl); + +	pr_debug("reading %u", bucket_index);  	while (offset < ca->sb.bucket_size) {  reread:		left = ca->sb.bucket_size - offset; -		len = min_t(unsigned, left, PAGE_SECTORS * 8); +		len = min_t(unsigned, left, PAGE_SECTORS << JSET_BITS);  		bio_reset(bio); -		bio->bi_sector	= bucket + offset; +		bio->bi_iter.bi_sector	= bucket + offset;  		bio->bi_bdev	= ca->bdev;  		bio->bi_rw	= READ; -		bio->bi_size	= len << 9; +		bio->bi_iter.bi_size	= len << 9;  		bio->bi_end_io	= journal_read_endio; -		bio->bi_private = &op->cl; +		bio->bi_private = &cl;  		bch_bio_map(bio, data); -		closure_bio_submit(bio, &op->cl, ca); -		closure_sync(&op->cl); +		closure_bio_submit(bio, &cl, ca); +		closure_sync(&cl);  		/* This function could be simpler now since we no longer write  		 * journal entries that overlap bucket boundaries; this means @@ -72,19 +74,28 @@ reread:		left = ca->sb.bucket_size - offset;  			struct list_head *where;  			size_t blocks, bytes = set_bytes(j); -			if (j->magic != jset_magic(ca->set)) +			if (j->magic != jset_magic(&ca->sb)) { +				pr_debug("%u: bad magic", bucket_index);  				return ret; +			} -			if (bytes > left << 9) +			if (bytes > left << 9 || +			    bytes > PAGE_SIZE << JSET_BITS) { +				pr_info("%u: too big, %zu bytes, offset %u", +					bucket_index, bytes, offset);  				return ret; +			}  			if (bytes > len << 9)  				goto reread; -			if (j->csum != csum_set(j)) +			if (j->csum != csum_set(j)) { +				pr_info("%u: bad csum, %zu bytes, offset %u", +					bucket_index, bytes, offset);  				return ret; +			} -			blocks = set_blocks(j, ca->set); +			blocks = set_blocks(j, block_bytes(ca->set));  			while (!list_empty(list)) {  				i = list_first_entry(list, @@ -129,12 +140,11 @@ next_set:  	return ret;  } -int bch_journal_read(struct cache_set *c, struct list_head *list, -			struct btree_op *op) +int bch_journal_read(struct cache_set *c, struct list_head *list)  {  #define read_bucket(b)							\  	({								\ -		int ret = journal_read_bucket(ca, list, op, b);		\ +		int ret = journal_read_bucket(ca, list, b);		\  		__set_bit(b, bitmap);					\  		if (ret < 0)						\  			return ret;					\ @@ -153,7 +163,8 @@ int bch_journal_read(struct cache_set *c, struct list_head *list,  		bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);  		pr_debug("%u journal buckets", ca->sb.njournal_buckets); -		/* Read journal buckets ordered by golden ratio hash to quickly +		/* +		 * Read journal buckets ordered by golden ratio hash to quickly  		 * find a sequence of buckets with valid journal entries  		 */  		for (i = 0; i < ca->sb.njournal_buckets; i++) { @@ -166,18 +177,20 @@ int bch_journal_read(struct cache_set *c, struct list_head *list,  				goto bsearch;  		} -		/* If that fails, check all the buckets we haven't checked +		/* +		 * If that fails, check all the buckets we haven't checked  		 * already  		 */  		pr_debug("falling back to linear search"); -		for (l = 0; l < ca->sb.njournal_buckets; l++) { -			if (test_bit(l, bitmap)) -				continue; - +		for (l = find_first_zero_bit(bitmap, ca->sb.njournal_buckets); +		     l < ca->sb.njournal_buckets; +		     l = find_next_zero_bit(bitmap, ca->sb.njournal_buckets, l + 1))  			if (read_bucket(l))  				goto bsearch; -		} + +		if (list_empty(list)) +			continue;  bsearch:  		/* Binary search */  		m = r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1); @@ -197,10 +210,12 @@ bsearch:  				r = m;  		} -		/* Read buckets in reverse order until we stop finding more +		/* +		 * Read buckets in reverse order until we stop finding more  		 * journal entries  		 */ -		pr_debug("finishing up"); +		pr_debug("finishing up: m %u njournal_buckets %u", +			 m, ca->sb.njournal_buckets);  		l = m;  		while (1) { @@ -222,15 +237,22 @@ bsearch:  		for (i = 0; i < ca->sb.njournal_buckets; i++)  			if (ja->seq[i] > seq) {  				seq = ja->seq[i]; -				ja->cur_idx = ja->discard_idx = -					ja->last_idx = i; +				/* +				 * When journal_reclaim() goes to allocate for +				 * the first time, it'll use the bucket after +				 * ja->cur_idx +				 */ +				ja->cur_idx = i; +				ja->last_idx = ja->discard_idx = (i + 1) % +					ca->sb.njournal_buckets;  			}  	} -	c->journal.seq = list_entry(list->prev, -				    struct journal_replay, -				    list)->j.seq; +	if (!list_empty(list)) +		c->journal.seq = list_entry(list->prev, +					    struct journal_replay, +					    list)->j.seq;  	return 0;  #undef read_bucket @@ -268,26 +290,20 @@ void bch_journal_mark(struct cache_set *c, struct list_head *list)  		}  		for (k = i->j.start; -		     k < end(&i->j); +		     k < bset_bkey_last(&i->j);  		     k = bkey_next(k)) {  			unsigned j; -			for (j = 0; j < KEY_PTRS(k); j++) { -				struct bucket *g = PTR_BUCKET(c, k, j); -				atomic_inc(&g->pin); +			for (j = 0; j < KEY_PTRS(k); j++) +				if (ptr_available(c, k, j)) +					atomic_inc(&PTR_BUCKET(c, k, j)->pin); -				if (g->prio == BTREE_PRIO && -				    !ptr_stale(c, k, j)) -					g->prio = INITIAL_PRIO; -			} - -			__bch_btree_mark_key(c, 0, k); +			bch_initial_mark_key(c, 0, k);  		}  	}  } -int bch_journal_replay(struct cache_set *s, struct list_head *list, -			  struct btree_op *op) +int bch_journal_replay(struct cache_set *s, struct list_head *list)  {  	int ret = 0, keys = 0, entries = 0;  	struct bkey *k; @@ -295,31 +311,27 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list,  		list_entry(list->prev, struct journal_replay, list);  	uint64_t start = i->j.last_seq, end = i->j.seq, n = start; +	struct keylist keylist;  	list_for_each_entry(i, list, list) {  		BUG_ON(i->pin && atomic_read(i->pin) != 1); -		if (n != i->j.seq) -			pr_err( -		"journal entries %llu-%llu missing! (replaying %llu-%llu)\n", -		n, i->j.seq - 1, start, end); +		cache_set_err_on(n != i->j.seq, s, +"bcache: journal entries %llu-%llu missing! (replaying %llu-%llu)", +				 n, i->j.seq - 1, start, end);  		for (k = i->j.start; -		     k < end(&i->j); +		     k < bset_bkey_last(&i->j);  		     k = bkey_next(k)) {  			trace_bcache_journal_replay_key(k); -			bkey_copy(op->keys.top, k); -			bch_keylist_push(&op->keys); - -			op->journal = i->pin; -			atomic_inc(op->journal); +			bch_keylist_init_single(&keylist, k); -			ret = bch_btree_insert(op, s); +			ret = bch_btree_insert(s, &keylist, i->pin, NULL);  			if (ret)  				goto err; -			BUG_ON(!bch_keylist_empty(&op->keys)); +			BUG_ON(!bch_keylist_empty(&keylist));  			keys++;  			cond_resched(); @@ -333,14 +345,13 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list,  	pr_info("journal replay done, %i keys in %i entries, seq %llu",  		keys, entries, end); - +err:  	while (!list_empty(list)) {  		i = list_first_entry(list, struct journal_replay, list);  		list_del(&i->list);  		kfree(i);  	} -err: -	closure_sync(&op->cl); +  	return ret;  } @@ -352,48 +363,34 @@ static void btree_flush_write(struct cache_set *c)  	 * Try to find the btree node with that references the oldest journal  	 * entry, best is our current candidate and is locked if non NULL:  	 */ -	struct btree *b, *best = NULL; -	unsigned iter; - -	for_each_cached_btree(b, c, iter) { -		if (!down_write_trylock(&b->lock)) -			continue; - -		if (!btree_node_dirty(b) || -		    !btree_current_write(b)->journal) { -			rw_unlock(true, b); -			continue; +	struct btree *b, *best; +	unsigned i; +retry: +	best = NULL; + +	for_each_cached_btree(b, c, i) +		if (btree_current_write(b)->journal) { +			if (!best) +				best = b; +			else if (journal_pin_cmp(c, +					btree_current_write(best)->journal, +					btree_current_write(b)->journal)) { +				best = b; +			}  		} -		if (!best) -			best = b; -		else if (journal_pin_cmp(c, -					 btree_current_write(best), -					 btree_current_write(b))) { -			rw_unlock(true, best); -			best = b; -		} else -			rw_unlock(true, b); -	} - -	if (best) -		goto out; - -	/* We can't find the best btree node, just pick the first */ -	list_for_each_entry(b, &c->btree_cache, list) -		if (!b->level && btree_node_dirty(b)) { -			best = b; -			rw_lock(true, best, best->level); -			goto found; +	b = best; +	if (b) { +		mutex_lock(&b->write_lock); +		if (!btree_current_write(b)->journal) { +			mutex_unlock(&b->write_lock); +			/* We raced */ +			goto retry;  		} -out: -	if (!best) -		return; -found: -	if (btree_node_dirty(best)) -		bch_btree_node_write(best, NULL); -	rw_unlock(true, best); +		__bch_btree_node_write(b, NULL); +		mutex_unlock(&b->write_lock); +	}  }  #define last_seq(j)	((j)->seq - fifo_used(&(j)->pin) + 1) @@ -428,7 +425,7 @@ static void do_journal_discard(struct cache *ca)  		return;  	} -	switch (atomic_read(&ja->discard_in_flight) == DISCARD_IN_FLIGHT) { +	switch (atomic_read(&ja->discard_in_flight)) {  	case DISCARD_IN_FLIGHT:  		return; @@ -446,13 +443,13 @@ static void do_journal_discard(struct cache *ca)  		atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT);  		bio_init(bio); -		bio->bi_sector		= bucket_to_sector(ca->set, +		bio->bi_iter.bi_sector	= bucket_to_sector(ca->set,  						ca->sb.d[ja->discard_idx]);  		bio->bi_bdev		= ca->bdev;  		bio->bi_rw		= REQ_WRITE|REQ_DISCARD;  		bio->bi_max_vecs	= 1;  		bio->bi_io_vec		= bio->bi_inline_vecs; -		bio->bi_size		= bucket_bytes(ca); +		bio->bi_iter.bi_size	= bucket_bytes(ca);  		bio->bi_end_io		= journal_discard_endio;  		closure_get(&ca->set->cl); @@ -489,7 +486,7 @@ static void journal_reclaim(struct cache_set *c)  		do_journal_discard(ca);  	if (c->journal.blocks_free) -		return; +		goto out;  	/*  	 * Allocate: @@ -515,7 +512,7 @@ static void journal_reclaim(struct cache_set *c)  	if (n)  		c->journal.blocks_free = c->sb.bucket_size >> c->block_bits; - +out:  	if (!journal_full(&c->journal))  		__closure_wake_up(&c->journal.wait);  } @@ -536,6 +533,7 @@ void bch_journal_next(struct journal *j)  	atomic_set(&fifo_back(&j->pin), 1);  	j->cur->data->seq	= ++j->seq; +	j->cur->dirty		= false;  	j->cur->need_write	= false;  	j->cur->data->keys	= 0; @@ -548,51 +546,46 @@ static void journal_write_endio(struct bio *bio, int error)  	struct journal_write *w = bio->bi_private;  	cache_set_err_on(error, w->c, "journal io error"); -	closure_put(&w->c->journal.io.cl); +	closure_put(&w->c->journal.io);  }  static void journal_write(struct closure *);  static void journal_write_done(struct closure *cl)  { -	struct journal *j = container_of(cl, struct journal, io.cl); -	struct cache_set *c = container_of(j, struct cache_set, journal); - +	struct journal *j = container_of(cl, struct journal, io);  	struct journal_write *w = (j->cur == j->w)  		? &j->w[1]  		: &j->w[0];  	__closure_wake_up(&w->wait); +	continue_at_nobarrier(cl, journal_write, system_wq); +} -	if (c->journal_delay_ms) -		closure_delay(&j->io, msecs_to_jiffies(c->journal_delay_ms)); +static void journal_write_unlock(struct closure *cl) +{ +	struct cache_set *c = container_of(cl, struct cache_set, journal.io); -	continue_at(cl, journal_write, system_wq); +	c->journal.io_in_flight = 0; +	spin_unlock(&c->journal.lock);  }  static void journal_write_unlocked(struct closure *cl)  	__releases(c->journal.lock)  { -	struct cache_set *c = container_of(cl, struct cache_set, journal.io.cl); +	struct cache_set *c = container_of(cl, struct cache_set, journal.io);  	struct cache *ca;  	struct journal_write *w = c->journal.cur;  	struct bkey *k = &c->journal.key; -	unsigned i, sectors = set_blocks(w->data, c) * c->sb.block_size; +	unsigned i, sectors = set_blocks(w->data, block_bytes(c)) * +		c->sb.block_size;  	struct bio *bio;  	struct bio_list list;  	bio_list_init(&list);  	if (!w->need_write) { -		/* -		 * XXX: have to unlock closure before we unlock journal lock, -		 * else we race with bch_journal(). But this way we race -		 * against cache set unregister. Doh. -		 */ -		set_closure_fn(cl, NULL, NULL); -		closure_sub(cl, CLOSURE_RUNNING + 1); -		spin_unlock(&c->journal.lock); -		return; +		closure_return_with_destructor(cl, journal_write_unlock);  	} else if (journal_full(&c->journal)) {  		journal_reclaim(c);  		spin_unlock(&c->journal.lock); @@ -601,7 +594,7 @@ static void journal_write_unlocked(struct closure *cl)  		continue_at(cl, journal_write, system_wq);  	} -	c->journal.blocks_free -= set_blocks(w->data, c); +	c->journal.blocks_free -= set_blocks(w->data, block_bytes(c));  	w->data->btree_level = c->root->level; @@ -611,7 +604,7 @@ static void journal_write_unlocked(struct closure *cl)  	for_each_cache(ca, c, i)  		w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0]; -	w->data->magic		= jset_magic(c); +	w->data->magic		= jset_magic(&c->sb);  	w->data->version	= BCACHE_JSET_VERSION;  	w->data->last_seq	= last_seq(&c->journal);  	w->data->csum		= csum_set(w->data); @@ -623,10 +616,10 @@ static void journal_write_unlocked(struct closure *cl)  		atomic_long_add(sectors, &ca->meta_sectors_written);  		bio_reset(bio); -		bio->bi_sector	= PTR_OFFSET(k, i); +		bio->bi_iter.bi_sector	= PTR_OFFSET(k, i);  		bio->bi_bdev	= ca->bdev;  		bio->bi_rw	= REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH|REQ_FUA; -		bio->bi_size	= sectors << 9; +		bio->bi_iter.bi_size = sectors << 9;  		bio->bi_end_io	= journal_write_endio;  		bio->bi_private = w; @@ -654,120 +647,144 @@ static void journal_write_unlocked(struct closure *cl)  static void journal_write(struct closure *cl)  { -	struct cache_set *c = container_of(cl, struct cache_set, journal.io.cl); +	struct cache_set *c = container_of(cl, struct cache_set, journal.io);  	spin_lock(&c->journal.lock);  	journal_write_unlocked(cl);  } -static void __journal_try_write(struct cache_set *c, bool noflush) +static void journal_try_write(struct cache_set *c)  	__releases(c->journal.lock)  { -	struct closure *cl = &c->journal.io.cl; +	struct closure *cl = &c->journal.io; +	struct journal_write *w = c->journal.cur; -	if (!closure_trylock(cl, &c->cl)) -		spin_unlock(&c->journal.lock); -	else if (noflush && journal_full(&c->journal)) { +	w->need_write = true; + +	if (!c->journal.io_in_flight) { +		c->journal.io_in_flight = 1; +		closure_call(cl, journal_write_unlocked, NULL, &c->cl); +	} else {  		spin_unlock(&c->journal.lock); -		continue_at(cl, journal_write, system_wq); -	} else -		journal_write_unlocked(cl); +	}  } -#define journal_try_write(c)	__journal_try_write(c, false) - -void bch_journal_meta(struct cache_set *c, struct closure *cl) +static struct journal_write *journal_wait_for_write(struct cache_set *c, +						    unsigned nkeys)  { -	struct journal_write *w; +	size_t sectors; +	struct closure cl; +	bool wait = false; -	if (CACHE_SYNC(&c->sb)) { -		spin_lock(&c->journal.lock); +	closure_init_stack(&cl); + +	spin_lock(&c->journal.lock); + +	while (1) { +		struct journal_write *w = c->journal.cur; + +		sectors = __set_blocks(w->data, w->data->keys + nkeys, +				       block_bytes(c)) * c->sb.block_size; + +		if (sectors <= min_t(size_t, +				     c->journal.blocks_free * c->sb.block_size, +				     PAGE_SECTORS << JSET_BITS)) +			return w; + +		if (wait) +			closure_wait(&c->journal.wait, &cl); + +		if (!journal_full(&c->journal)) { +			if (wait) +				trace_bcache_journal_entry_full(c); -		w = c->journal.cur; -		w->need_write = true; +			/* +			 * XXX: If we were inserting so many keys that they +			 * won't fit in an _empty_ journal write, we'll +			 * deadlock. For now, handle this in +			 * bch_keylist_realloc() - but something to think about. +			 */ +			BUG_ON(!w->data->keys); -		if (cl) -			BUG_ON(!closure_wait(&w->wait, cl)); +			journal_try_write(c); /* unlocks */ +		} else { +			if (wait) +				trace_bcache_journal_full(c); -		__journal_try_write(c, true); +			journal_reclaim(c); +			spin_unlock(&c->journal.lock); + +			btree_flush_write(c); +		} + +		closure_sync(&cl); +		spin_lock(&c->journal.lock); +		wait = true;  	}  } +static void journal_write_work(struct work_struct *work) +{ +	struct cache_set *c = container_of(to_delayed_work(work), +					   struct cache_set, +					   journal.work); +	spin_lock(&c->journal.lock); +	if (c->journal.cur->dirty) +		journal_try_write(c); +	else +		spin_unlock(&c->journal.lock); +} +  /*   * Entry point to the journalling code - bio_insert() and btree_invalidate()   * pass bch_journal() a list of keys to be journalled, and then   * bch_journal() hands those same keys off to btree_insert_async()   */ -void bch_journal(struct closure *cl) +atomic_t *bch_journal(struct cache_set *c, +		      struct keylist *keys, +		      struct closure *parent)  { -	struct btree_op *op = container_of(cl, struct btree_op, cl); -	struct cache_set *c = op->c;  	struct journal_write *w; -	size_t b, n = ((uint64_t *) op->keys.top) - op->keys.list; +	atomic_t *ret; -	if (op->type != BTREE_INSERT || -	    !CACHE_SYNC(&c->sb)) -		goto out; +	if (!CACHE_SYNC(&c->sb)) +		return NULL; -	/* -	 * If we're looping because we errored, might already be waiting on -	 * another journal write: -	 */ -	while (atomic_read(&cl->parent->remaining) & CLOSURE_WAITING) -		closure_sync(cl->parent); - -	spin_lock(&c->journal.lock); +	w = journal_wait_for_write(c, bch_keylist_nkeys(keys)); -	if (journal_full(&c->journal)) { -		trace_bcache_journal_full(c); +	memcpy(bset_bkey_last(w->data), keys->keys, bch_keylist_bytes(keys)); +	w->data->keys += bch_keylist_nkeys(keys); -		closure_wait(&c->journal.wait, cl); +	ret = &fifo_back(&c->journal.pin); +	atomic_inc(ret); -		journal_reclaim(c); +	if (parent) { +		closure_wait(&w->wait, parent); +		journal_try_write(c); +	} else if (!w->dirty) { +		w->dirty = true; +		schedule_delayed_work(&c->journal.work, +				      msecs_to_jiffies(c->journal_delay_ms)); +		spin_unlock(&c->journal.lock); +	} else {  		spin_unlock(&c->journal.lock); - -		btree_flush_write(c); -		continue_at(cl, bch_journal, bcache_wq);  	} -	w = c->journal.cur; -	w->need_write = true; -	b = __set_blocks(w->data, w->data->keys + n, c); - -	if (b * c->sb.block_size > PAGE_SECTORS << JSET_BITS || -	    b > c->journal.blocks_free) { -		trace_bcache_journal_entry_full(c); - -		/* -		 * XXX: If we were inserting so many keys that they won't fit in -		 * an _empty_ journal write, we'll deadlock. For now, handle -		 * this in bch_keylist_realloc() - but something to think about. -		 */ -		BUG_ON(!w->data->keys); - -		BUG_ON(!closure_wait(&w->wait, cl)); - -		closure_flush(&c->journal.io); - -		journal_try_write(c); -		continue_at(cl, bch_journal, bcache_wq); -	} -	memcpy(end(w->data), op->keys.list, n * sizeof(uint64_t)); -	w->data->keys += n; +	return ret; +} -	op->journal = &fifo_back(&c->journal.pin); -	atomic_inc(op->journal); +void bch_journal_meta(struct cache_set *c, struct closure *cl) +{ +	struct keylist keys; +	atomic_t *ref; -	if (op->flush_journal) { -		closure_flush(&c->journal.io); -		closure_wait(&w->wait, cl->parent); -	} +	bch_keylist_init(&keys); -	journal_try_write(c); -out: -	bch_btree_insert_async(cl); +	ref = bch_journal(c, &keys, cl); +	if (ref) +		atomic_dec_bug(ref);  }  void bch_journal_free(struct cache_set *c) @@ -781,8 +798,8 @@ int bch_journal_alloc(struct cache_set *c)  {  	struct journal *j = &c->journal; -	closure_init_unlocked(&j->io);  	spin_lock_init(&j->lock); +	INIT_DELAYED_WORK(&j->work, journal_write_work);  	c->journal_delay_ms = 100; diff --git a/drivers/md/bcache/journal.h b/drivers/md/bcache/journal.h index 3d7851274b0..e3c39457afb 100644 --- a/drivers/md/bcache/journal.h +++ b/drivers/md/bcache/journal.h @@ -75,43 +75,6 @@   * nodes that are pinning the oldest journal entries first.   */ -#define BCACHE_JSET_VERSION_UUIDv1	1 -/* Always latest UUID format */ -#define BCACHE_JSET_VERSION_UUID	1 -#define BCACHE_JSET_VERSION		1 - -/* - * On disk format for a journal entry: - * seq is monotonically increasing; every journal entry has its own unique - * sequence number. - * - * last_seq is the oldest journal entry that still has keys the btree hasn't - * flushed to disk yet. - * - * version is for on disk format changes. - */ -struct jset { -	uint64_t		csum; -	uint64_t		magic; -	uint64_t		seq; -	uint32_t		version; -	uint32_t		keys; - -	uint64_t		last_seq; - -	BKEY_PADDED(uuid_bucket); -	BKEY_PADDED(btree_root); -	uint16_t		btree_level; -	uint16_t		pad[3]; - -	uint64_t		prio_bucket[MAX_CACHES_PER_SET]; - -	union { -		struct bkey	start[0]; -		uint64_t	d[0]; -	}; -}; -  /*   * Only used for holding the journal entries we read in btree_journal_read()   * during cache_registration @@ -132,6 +95,7 @@ struct journal_write {  	struct cache_set	*c;  	struct closure_waitlist	wait; +	bool			dirty;  	bool			need_write;  }; @@ -140,7 +104,9 @@ struct journal {  	spinlock_t		lock;  	/* used when waiting because the journal was full */  	struct closure_waitlist	wait; -	struct closure_with_timer io; +	struct closure		io; +	int			io_in_flight; +	struct delayed_work	work;  	/* Number of blocks free in the bucket(s) we're currently writing to */  	unsigned		blocks_free; @@ -188,8 +154,7 @@ struct journal_device {  };  #define journal_pin_cmp(c, l, r)				\ -	(fifo_idx(&(c)->journal.pin, (l)->journal) >		\ -	 fifo_idx(&(c)->journal.pin, (r)->journal)) +	(fifo_idx(&(c)->journal.pin, (l)) > fifo_idx(&(c)->journal.pin, (r)))  #define JOURNAL_PIN	20000 @@ -199,15 +164,14 @@ struct journal_device {  struct closure;  struct cache_set;  struct btree_op; +struct keylist; -void bch_journal(struct closure *); +atomic_t *bch_journal(struct cache_set *, struct keylist *, struct closure *);  void bch_journal_next(struct journal *);  void bch_journal_mark(struct cache_set *, struct list_head *);  void bch_journal_meta(struct cache_set *, struct closure *); -int bch_journal_read(struct cache_set *, struct list_head *, -			struct btree_op *); -int bch_journal_replay(struct cache_set *, struct list_head *, -			  struct btree_op *); +int bch_journal_read(struct cache_set *, struct list_head *); +int bch_journal_replay(struct cache_set *, struct list_head *);  void bch_journal_free(struct cache_set *);  int bch_journal_alloc(struct cache_set *); diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c index 1a3b4f4786c..cd7490311e5 100644 --- a/drivers/md/bcache/movinggc.c +++ b/drivers/md/bcache/movinggc.c @@ -12,8 +12,9 @@  #include <trace/events/bcache.h>  struct moving_io { +	struct closure		cl;  	struct keybuf_key	*w; -	struct search		s; +	struct data_insert_op	op;  	struct bbio		bio;  }; @@ -23,13 +24,10 @@ static bool moving_pred(struct keybuf *buf, struct bkey *k)  					   moving_gc_keys);  	unsigned i; -	for (i = 0; i < KEY_PTRS(k); i++) { -		struct cache *ca = PTR_CACHE(c, k, i); -		struct bucket *g = PTR_BUCKET(c, k, i); - -		if (GC_SECTORS_USED(g) < ca->gc_move_threshold) +	for (i = 0; i < KEY_PTRS(k); i++) +		if (ptr_available(c, k, i) && +		    GC_MOVE(PTR_BUCKET(c, k, i)))  			return true; -	}  	return false;  } @@ -38,13 +36,13 @@ static bool moving_pred(struct keybuf *buf, struct bkey *k)  static void moving_io_destructor(struct closure *cl)  { -	struct moving_io *io = container_of(cl, struct moving_io, s.cl); +	struct moving_io *io = container_of(cl, struct moving_io, cl);  	kfree(io);  }  static void write_moving_finish(struct closure *cl)  { -	struct moving_io *io = container_of(cl, struct moving_io, s.cl); +	struct moving_io *io = container_of(cl, struct moving_io, cl);  	struct bio *bio = &io->bio.bio;  	struct bio_vec *bv;  	int i; @@ -52,26 +50,30 @@ static void write_moving_finish(struct closure *cl)  	bio_for_each_segment_all(bv, bio, i)  		__free_page(bv->bv_page); -	if (io->s.op.insert_collision) +	if (io->op.replace_collision)  		trace_bcache_gc_copy_collision(&io->w->key); -	bch_keybuf_del(&io->s.op.c->moving_gc_keys, io->w); +	bch_keybuf_del(&io->op.c->moving_gc_keys, io->w); -	atomic_dec_bug(&io->s.op.c->in_flight); -	closure_wake_up(&io->s.op.c->moving_gc_wait); +	up(&io->op.c->moving_in_flight);  	closure_return_with_destructor(cl, moving_io_destructor);  }  static void read_moving_endio(struct bio *bio, int error)  { +	struct bbio *b = container_of(bio, struct bbio, bio);  	struct moving_io *io = container_of(bio->bi_private, -					    struct moving_io, s.cl); +					    struct moving_io, cl);  	if (error) -		io->s.error = error; +		io->op.error = error; +	else if (!KEY_DIRTY(&b->key) && +		 ptr_stale(io->op.c, &b->key, 0)) { +		io->op.error = -EINTR; +	} -	bch_bbio_endio(io->s.op.c, bio, error, "reading data to move"); +	bch_bbio_endio(io->op.c, bio, error, "reading data to move");  }  static void moving_init(struct moving_io *io) @@ -82,57 +84,56 @@ static void moving_init(struct moving_io *io)  	bio_get(bio);  	bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); -	bio->bi_size		= KEY_SIZE(&io->w->key) << 9; +	bio->bi_iter.bi_size	= KEY_SIZE(&io->w->key) << 9;  	bio->bi_max_vecs	= DIV_ROUND_UP(KEY_SIZE(&io->w->key),  					       PAGE_SECTORS); -	bio->bi_private		= &io->s.cl; +	bio->bi_private		= &io->cl;  	bio->bi_io_vec		= bio->bi_inline_vecs;  	bch_bio_map(bio, NULL);  }  static void write_moving(struct closure *cl)  { -	struct search *s = container_of(cl, struct search, cl); -	struct moving_io *io = container_of(s, struct moving_io, s); +	struct moving_io *io = container_of(cl, struct moving_io, cl); +	struct data_insert_op *op = &io->op; -	if (!s->error) { +	if (!op->error) {  		moving_init(io); -		io->bio.bio.bi_sector	= KEY_START(&io->w->key); -		s->op.lock		= -1; -		s->op.write_prio	= 1; -		s->op.cache_bio		= &io->bio.bio; +		io->bio.bio.bi_iter.bi_sector = KEY_START(&io->w->key); +		op->write_prio		= 1; +		op->bio			= &io->bio.bio; -		s->writeback		= KEY_DIRTY(&io->w->key); -		s->op.csum		= KEY_CSUM(&io->w->key); +		op->writeback		= KEY_DIRTY(&io->w->key); +		op->csum		= KEY_CSUM(&io->w->key); -		s->op.type = BTREE_REPLACE; -		bkey_copy(&s->op.replace, &io->w->key); +		bkey_copy(&op->replace_key, &io->w->key); +		op->replace		= true; -		closure_init(&s->op.cl, cl); -		bch_insert_data(&s->op.cl); +		closure_call(&op->cl, bch_data_insert, NULL, cl);  	} -	continue_at(cl, write_moving_finish, NULL); +	continue_at(cl, write_moving_finish, op->wq);  }  static void read_moving_submit(struct closure *cl)  { -	struct search *s = container_of(cl, struct search, cl); -	struct moving_io *io = container_of(s, struct moving_io, s); +	struct moving_io *io = container_of(cl, struct moving_io, cl);  	struct bio *bio = &io->bio.bio; -	bch_submit_bbio(bio, s->op.c, &io->w->key, 0); +	bch_submit_bbio(bio, io->op.c, &io->w->key, 0); -	continue_at(cl, write_moving, bch_gc_wq); +	continue_at(cl, write_moving, io->op.wq);  } -static void read_moving(struct closure *cl) +static void read_moving(struct cache_set *c)  { -	struct cache_set *c = container_of(cl, struct cache_set, moving_gc);  	struct keybuf_key *w;  	struct moving_io *io;  	struct bio *bio; +	struct closure cl; + +	closure_init_stack(&cl);  	/* XXX: if we error, background writeback could stall indefinitely */ @@ -142,6 +143,11 @@ static void read_moving(struct closure *cl)  		if (!w)  			break; +		if (ptr_stale(c, &w->key, 0)) { +			bch_keybuf_del(&c->moving_gc_keys, w); +			continue; +		} +  		io = kzalloc(sizeof(struct moving_io) + sizeof(struct bio_vec)  			     * DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),  			     GFP_KERNEL); @@ -150,8 +156,9 @@ static void read_moving(struct closure *cl)  		w->private	= io;  		io->w		= w; -		io->s.op.inode	= KEY_INODE(&w->key); -		io->s.op.c	= c; +		io->op.inode	= KEY_INODE(&w->key); +		io->op.c	= c; +		io->op.wq	= c->moving_gc_wq;  		moving_init(io);  		bio = &io->bio.bio; @@ -164,13 +171,8 @@ static void read_moving(struct closure *cl)  		trace_bcache_gc_copy(&w->key); -		closure_call(&io->s.cl, read_moving_submit, NULL, &c->gc.cl); - -		if (atomic_inc_return(&c->in_flight) >= 64) { -			closure_wait_event(&c->moving_gc_wait, cl, -					   atomic_read(&c->in_flight) < 64); -			continue_at(cl, read_moving, bch_gc_wq); -		} +		down(&c->moving_in_flight); +		closure_call(&io->cl, read_moving_submit, NULL, &cl);  	}  	if (0) { @@ -180,7 +182,7 @@ err:		if (!IS_ERR_OR_NULL(w->private))  		bch_keybuf_del(&c->moving_gc_keys, w);  	} -	closure_return(cl); +	closure_sync(&cl);  }  static bool bucket_cmp(struct bucket *l, struct bucket *r) @@ -190,30 +192,33 @@ static bool bucket_cmp(struct bucket *l, struct bucket *r)  static unsigned bucket_heap_top(struct cache *ca)  { -	return GC_SECTORS_USED(heap_peek(&ca->heap)); +	struct bucket *b; +	return (b = heap_peek(&ca->heap)) ? GC_SECTORS_USED(b) : 0;  } -void bch_moving_gc(struct closure *cl) +void bch_moving_gc(struct cache_set *c)  { -	struct cache_set *c = container_of(cl, struct cache_set, gc.cl);  	struct cache *ca;  	struct bucket *b;  	unsigned i;  	if (!c->copy_gc_enabled) -		closure_return(cl); +		return;  	mutex_lock(&c->bucket_lock);  	for_each_cache(ca, c, i) {  		unsigned sectors_to_move = 0;  		unsigned reserve_sectors = ca->sb.bucket_size * -			min(fifo_used(&ca->free), ca->free.size / 2); +			fifo_used(&ca->free[RESERVE_MOVINGGC]);  		ca->heap.used = 0;  		for_each_bucket(b, ca) { -			if (!GC_SECTORS_USED(b)) +			if (GC_MARK(b) == GC_MARK_METADATA || +			    !GC_SECTORS_USED(b) || +			    GC_SECTORS_USED(b) == ca->sb.bucket_size || +			    atomic_read(&b->pin))  				continue;  			if (!heap_full(&ca->heap)) { @@ -233,22 +238,19 @@ void bch_moving_gc(struct closure *cl)  			sectors_to_move -= GC_SECTORS_USED(b);  		} -		ca->gc_move_threshold = bucket_heap_top(ca); - -		pr_debug("threshold %u", ca->gc_move_threshold); +		while (heap_pop(&ca->heap, b, bucket_cmp)) +			SET_GC_MOVE(b, 1);  	}  	mutex_unlock(&c->bucket_lock);  	c->moving_gc_keys.last_scanned = ZERO_KEY; -	closure_init(&c->moving_gc, cl); -	read_moving(&c->moving_gc); - -	closure_return(cl); +	read_moving(c);  }  void bch_moving_init_cache_set(struct cache_set *c)  {  	bch_keybuf_init(&c->moving_gc_keys); +	sema_init(&c->moving_in_flight, 64);  } diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 786a1a4f74d..15fff4f68a7 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -12,11 +12,9 @@  #include "request.h"  #include "writeback.h" -#include <linux/cgroup.h>  #include <linux/module.h>  #include <linux/hash.h>  #include <linux/random.h> -#include "blk-cgroup.h"  #include <trace/events/bcache.h> @@ -25,187 +23,28 @@  struct kmem_cache *bch_search_cache; -static void check_should_skip(struct cached_dev *, struct search *); - -/* Cgroup interface */ - -#ifdef CONFIG_CGROUP_BCACHE -static struct bch_cgroup bcache_default_cgroup = { .cache_mode = -1 }; - -static struct bch_cgroup *cgroup_to_bcache(struct cgroup *cgroup) -{ -	struct cgroup_subsys_state *css; -	return cgroup && -		(css = cgroup_subsys_state(cgroup, bcache_subsys_id)) -		? container_of(css, struct bch_cgroup, css) -		: &bcache_default_cgroup; -} - -struct bch_cgroup *bch_bio_to_cgroup(struct bio *bio) -{ -	struct cgroup_subsys_state *css = bio->bi_css -		? cgroup_subsys_state(bio->bi_css->cgroup, bcache_subsys_id) -		: task_subsys_state(current, bcache_subsys_id); - -	return css -		? container_of(css, struct bch_cgroup, css) -		: &bcache_default_cgroup; -} - -static ssize_t cache_mode_read(struct cgroup *cgrp, struct cftype *cft, -			struct file *file, -			char __user *buf, size_t nbytes, loff_t *ppos) -{ -	char tmp[1024]; -	int len = bch_snprint_string_list(tmp, PAGE_SIZE, bch_cache_modes, -					  cgroup_to_bcache(cgrp)->cache_mode + 1); - -	if (len < 0) -		return len; - -	return simple_read_from_buffer(buf, nbytes, ppos, tmp, len); -} - -static int cache_mode_write(struct cgroup *cgrp, struct cftype *cft, -			    const char *buf) -{ -	int v = bch_read_string_list(buf, bch_cache_modes); -	if (v < 0) -		return v; - -	cgroup_to_bcache(cgrp)->cache_mode = v - 1; -	return 0; -} - -static u64 bch_verify_read(struct cgroup *cgrp, struct cftype *cft) -{ -	return cgroup_to_bcache(cgrp)->verify; -} - -static int bch_verify_write(struct cgroup *cgrp, struct cftype *cft, u64 val) -{ -	cgroup_to_bcache(cgrp)->verify = val; -	return 0; -} - -static u64 bch_cache_hits_read(struct cgroup *cgrp, struct cftype *cft) -{ -	struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp); -	return atomic_read(&bcachecg->stats.cache_hits); -} - -static u64 bch_cache_misses_read(struct cgroup *cgrp, struct cftype *cft) -{ -	struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp); -	return atomic_read(&bcachecg->stats.cache_misses); -} - -static u64 bch_cache_bypass_hits_read(struct cgroup *cgrp, -					 struct cftype *cft) -{ -	struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp); -	return atomic_read(&bcachecg->stats.cache_bypass_hits); -} - -static u64 bch_cache_bypass_misses_read(struct cgroup *cgrp, -					   struct cftype *cft) -{ -	struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp); -	return atomic_read(&bcachecg->stats.cache_bypass_misses); -} - -static struct cftype bch_files[] = { -	{ -		.name		= "cache_mode", -		.read		= cache_mode_read, -		.write_string	= cache_mode_write, -	}, -	{ -		.name		= "verify", -		.read_u64	= bch_verify_read, -		.write_u64	= bch_verify_write, -	}, -	{ -		.name		= "cache_hits", -		.read_u64	= bch_cache_hits_read, -	}, -	{ -		.name		= "cache_misses", -		.read_u64	= bch_cache_misses_read, -	}, -	{ -		.name		= "cache_bypass_hits", -		.read_u64	= bch_cache_bypass_hits_read, -	}, -	{ -		.name		= "cache_bypass_misses", -		.read_u64	= bch_cache_bypass_misses_read, -	}, -	{ }	/* terminate */ -}; - -static void init_bch_cgroup(struct bch_cgroup *cg) -{ -	cg->cache_mode = -1; -} - -static struct cgroup_subsys_state *bcachecg_create(struct cgroup *cgroup) -{ -	struct bch_cgroup *cg; - -	cg = kzalloc(sizeof(*cg), GFP_KERNEL); -	if (!cg) -		return ERR_PTR(-ENOMEM); -	init_bch_cgroup(cg); -	return &cg->css; -} - -static void bcachecg_destroy(struct cgroup *cgroup) -{ -	struct bch_cgroup *cg = cgroup_to_bcache(cgroup); -	free_css_id(&bcache_subsys, &cg->css); -	kfree(cg); -} - -struct cgroup_subsys bcache_subsys = { -	.create		= bcachecg_create, -	.destroy	= bcachecg_destroy, -	.subsys_id	= bcache_subsys_id, -	.name		= "bcache", -	.module		= THIS_MODULE, -}; -EXPORT_SYMBOL_GPL(bcache_subsys); -#endif +static void bch_data_insert_start(struct closure *);  static unsigned cache_mode(struct cached_dev *dc, struct bio *bio)  { -#ifdef CONFIG_CGROUP_BCACHE -	int r = bch_bio_to_cgroup(bio)->cache_mode; -	if (r >= 0) -		return r; -#endif  	return BDEV_CACHE_MODE(&dc->sb);  }  static bool verify(struct cached_dev *dc, struct bio *bio)  { -#ifdef CONFIG_CGROUP_BCACHE -	if (bch_bio_to_cgroup(bio)->verify) -		return true; -#endif  	return dc->verify;  }  static void bio_csum(struct bio *bio, struct bkey *k)  { -	struct bio_vec *bv; +	struct bio_vec bv; +	struct bvec_iter iter;  	uint64_t csum = 0; -	int i; -	bio_for_each_segment(bv, bio, i) { -		void *d = kmap(bv->bv_page) + bv->bv_offset; -		csum = bch_crc64_update(csum, d, bv->bv_len); -		kunmap(bv->bv_page); +	bio_for_each_segment(bv, bio, iter) { +		void *d = kmap(bv.bv_page) + bv.bv_offset; +		csum = bch_crc64_update(csum, d, bv.bv_len); +		kunmap(bv.bv_page);  	}  	k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1); @@ -213,221 +52,97 @@ static void bio_csum(struct bio *bio, struct bkey *k)  /* Insert data into cache */ -static void bio_invalidate(struct closure *cl) -{ -	struct btree_op *op = container_of(cl, struct btree_op, cl); -	struct bio *bio = op->cache_bio; - -	pr_debug("invalidating %i sectors from %llu", -		 bio_sectors(bio), (uint64_t) bio->bi_sector); - -	while (bio_sectors(bio)) { -		unsigned len = min(bio_sectors(bio), 1U << 14); - -		if (bch_keylist_realloc(&op->keys, 0, op->c)) -			goto out; - -		bio->bi_sector	+= len; -		bio->bi_size	-= len << 9; - -		bch_keylist_add(&op->keys, -				&KEY(op->inode, bio->bi_sector, len)); -	} - -	op->insert_data_done = true; -	bio_put(bio); -out: -	continue_at(cl, bch_journal, bcache_wq); -} - -struct open_bucket { -	struct list_head	list; -	struct task_struct	*last; -	unsigned		sectors_free; -	BKEY_PADDED(key); -}; - -void bch_open_buckets_free(struct cache_set *c) +static void bch_data_insert_keys(struct closure *cl)  { -	struct open_bucket *b; +	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); +	atomic_t *journal_ref = NULL; +	struct bkey *replace_key = op->replace ? &op->replace_key : NULL; +	int ret; -	while (!list_empty(&c->data_buckets)) { -		b = list_first_entry(&c->data_buckets, -				     struct open_bucket, list); -		list_del(&b->list); -		kfree(b); -	} -} - -int bch_open_buckets_alloc(struct cache_set *c) -{ -	int i; - -	spin_lock_init(&c->data_bucket_lock); +	/* +	 * If we're looping, might already be waiting on +	 * another journal write - can't wait on more than one journal write at +	 * a time +	 * +	 * XXX: this looks wrong +	 */ +#if 0 +	while (atomic_read(&s->cl.remaining) & CLOSURE_WAITING) +		closure_sync(&s->cl); +#endif -	for (i = 0; i < 6; i++) { -		struct open_bucket *b = kzalloc(sizeof(*b), GFP_KERNEL); -		if (!b) -			return -ENOMEM; +	if (!op->replace) +		journal_ref = bch_journal(op->c, &op->insert_keys, +					  op->flush_journal ? cl : NULL); -		list_add(&b->list, &c->data_buckets); +	ret = bch_btree_insert(op->c, &op->insert_keys, +			       journal_ref, replace_key); +	if (ret == -ESRCH) { +		op->replace_collision = true; +	} else if (ret) { +		op->error		= -ENOMEM; +		op->insert_data_done	= true;  	} -	return 0; -} +	if (journal_ref) +		atomic_dec_bug(journal_ref); -/* - * We keep multiple buckets open for writes, and try to segregate different - * write streams for better cache utilization: first we look for a bucket where - * the last write to it was sequential with the current write, and failing that - * we look for a bucket that was last used by the same task. - * - * The ideas is if you've got multiple tasks pulling data into the cache at the - * same time, you'll get better cache utilization if you try to segregate their - * data and preserve locality. - * - * For example, say you've starting Firefox at the same time you're copying a - * bunch of files. Firefox will likely end up being fairly hot and stay in the - * cache awhile, but the data you copied might not be; if you wrote all that - * data to the same buckets it'd get invalidated at the same time. - * - * Both of those tasks will be doing fairly random IO so we can't rely on - * detecting sequential IO to segregate their data, but going off of the task - * should be a sane heuristic. - */ -static struct open_bucket *pick_data_bucket(struct cache_set *c, -					    const struct bkey *search, -					    struct task_struct *task, -					    struct bkey *alloc) -{ -	struct open_bucket *ret, *ret_task = NULL; +	if (!op->insert_data_done) +		continue_at(cl, bch_data_insert_start, op->wq); -	list_for_each_entry_reverse(ret, &c->data_buckets, list) -		if (!bkey_cmp(&ret->key, search)) -			goto found; -		else if (ret->last == task) -			ret_task = ret; - -	ret = ret_task ?: list_first_entry(&c->data_buckets, -					   struct open_bucket, list); -found: -	if (!ret->sectors_free && KEY_PTRS(alloc)) { -		ret->sectors_free = c->sb.bucket_size; -		bkey_copy(&ret->key, alloc); -		bkey_init(alloc); -	} - -	if (!ret->sectors_free) -		ret = NULL; - -	return ret; +	bch_keylist_free(&op->insert_keys); +	closure_return(cl);  } -/* - * Allocates some space in the cache to write to, and k to point to the newly - * allocated space, and updates KEY_SIZE(k) and KEY_OFFSET(k) (to point to the - * end of the newly allocated space). - * - * May allocate fewer sectors than @sectors, KEY_SIZE(k) indicates how many - * sectors were actually allocated. - * - * If s->writeback is true, will not fail. - */ -static bool bch_alloc_sectors(struct bkey *k, unsigned sectors, -			      struct search *s) +static int bch_keylist_realloc(struct keylist *l, unsigned u64s, +			       struct cache_set *c)  { -	struct cache_set *c = s->op.c; -	struct open_bucket *b; -	BKEY_PADDED(key) alloc; -	struct closure cl, *w = NULL; -	unsigned i; - -	if (s->writeback) { -		closure_init_stack(&cl); -		w = &cl; -	} - -	/* -	 * We might have to allocate a new bucket, which we can't do with a -	 * spinlock held. So if we have to allocate, we drop the lock, allocate -	 * and then retry. KEY_PTRS() indicates whether alloc points to -	 * allocated bucket(s). -	 */ - -	bkey_init(&alloc.key); -	spin_lock(&c->data_bucket_lock); - -	while (!(b = pick_data_bucket(c, k, s->task, &alloc.key))) { -		unsigned watermark = s->op.write_prio -			? WATERMARK_MOVINGGC -			: WATERMARK_NONE; - -		spin_unlock(&c->data_bucket_lock); - -		if (bch_bucket_alloc_set(c, watermark, &alloc.key, 1, w)) -			return false; - -		spin_lock(&c->data_bucket_lock); -	} +	size_t oldsize = bch_keylist_nkeys(l); +	size_t newsize = oldsize + u64s;  	/* -	 * If we had to allocate, we might race and not need to allocate the -	 * second time we call find_data_bucket(). If we allocated a bucket but -	 * didn't use it, drop the refcount bch_bucket_alloc_set() took: +	 * The journalling code doesn't handle the case where the keys to insert +	 * is bigger than an empty write: If we just return -ENOMEM here, +	 * bio_insert() and bio_invalidate() will insert the keys created so far +	 * and finish the rest when the keylist is empty.  	 */ -	if (KEY_PTRS(&alloc.key)) -		__bkey_put(c, &alloc.key); - -	for (i = 0; i < KEY_PTRS(&b->key); i++) -		EBUG_ON(ptr_stale(c, &b->key, i)); - -	/* Set up the pointer to the space we're allocating: */ +	if (newsize * sizeof(uint64_t) > block_bytes(c) - sizeof(struct jset)) +		return -ENOMEM; -	for (i = 0; i < KEY_PTRS(&b->key); i++) -		k->ptr[i] = b->key.ptr[i]; +	return __bch_keylist_realloc(l, u64s); +} -	sectors = min(sectors, b->sectors_free); +static void bch_data_invalidate(struct closure *cl) +{ +	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); +	struct bio *bio = op->bio; -	SET_KEY_OFFSET(k, KEY_OFFSET(k) + sectors); -	SET_KEY_SIZE(k, sectors); -	SET_KEY_PTRS(k, KEY_PTRS(&b->key)); +	pr_debug("invalidating %i sectors from %llu", +		 bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector); -	/* -	 * Move b to the end of the lru, and keep track of what this bucket was -	 * last used for: -	 */ -	list_move_tail(&b->list, &c->data_buckets); -	bkey_copy_key(&b->key, k); -	b->last = s->task; +	while (bio_sectors(bio)) { +		unsigned sectors = min(bio_sectors(bio), +				       1U << (KEY_SIZE_BITS - 1)); -	b->sectors_free	-= sectors; +		if (bch_keylist_realloc(&op->insert_keys, 2, op->c)) +			goto out; -	for (i = 0; i < KEY_PTRS(&b->key); i++) { -		SET_PTR_OFFSET(&b->key, i, PTR_OFFSET(&b->key, i) + sectors); +		bio->bi_iter.bi_sector	+= sectors; +		bio->bi_iter.bi_size	-= sectors << 9; -		atomic_long_add(sectors, -				&PTR_CACHE(c, &b->key, i)->sectors_written); +		bch_keylist_add(&op->insert_keys, +				&KEY(op->inode, bio->bi_iter.bi_sector, sectors));  	} -	if (b->sectors_free < c->sb.block_size) -		b->sectors_free = 0; - -	/* -	 * k takes refcounts on the buckets it points to until it's inserted -	 * into the btree, but if we're done with this bucket we just transfer -	 * get_data_bucket()'s refcount. -	 */ -	if (b->sectors_free) -		for (i = 0; i < KEY_PTRS(&b->key); i++) -			atomic_inc(&PTR_BUCKET(c, &b->key, i)->pin); - -	spin_unlock(&c->data_bucket_lock); -	return true; +	op->insert_data_done = true; +	bio_put(bio); +out: +	continue_at(cl, bch_data_insert_keys, op->wq);  } -static void bch_insert_data_error(struct closure *cl) +static void bch_data_insert_error(struct closure *cl)  { -	struct btree_op *op = container_of(cl, struct btree_op, cl); +	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);  	/*  	 * Our data write just errored, which means we've got a bunch of keys to @@ -438,35 +153,34 @@ static void bch_insert_data_error(struct closure *cl)  	 * from the keys we'll accomplish just that.  	 */ -	struct bkey *src = op->keys.bottom, *dst = op->keys.bottom; +	struct bkey *src = op->insert_keys.keys, *dst = op->insert_keys.keys; -	while (src != op->keys.top) { +	while (src != op->insert_keys.top) {  		struct bkey *n = bkey_next(src);  		SET_KEY_PTRS(src, 0); -		bkey_copy(dst, src); +		memmove(dst, src, bkey_bytes(src));  		dst = bkey_next(dst);  		src = n;  	} -	op->keys.top = dst; +	op->insert_keys.top = dst; -	bch_journal(cl); +	bch_data_insert_keys(cl);  } -static void bch_insert_data_endio(struct bio *bio, int error) +static void bch_data_insert_endio(struct bio *bio, int error)  {  	struct closure *cl = bio->bi_private; -	struct btree_op *op = container_of(cl, struct btree_op, cl); -	struct search *s = container_of(op, struct search, op); +	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);  	if (error) {  		/* TODO: We could try to recover from this. */ -		if (s->writeback) -			s->error = error; -		else if (s->write) -			set_closure_fn(cl, bch_insert_data_error, bcache_wq); +		if (op->writeback) +			op->error = error; +		else if (!op->replace) +			set_closure_fn(cl, bch_data_insert_error, op->wq);  		else  			set_closure_fn(cl, NULL, NULL);  	} @@ -474,20 +188,19 @@ static void bch_insert_data_endio(struct bio *bio, int error)  	bch_bbio_endio(op->c, bio, error, "writing data to cache");  } -static void bch_insert_data_loop(struct closure *cl) +static void bch_data_insert_start(struct closure *cl)  { -	struct btree_op *op = container_of(cl, struct btree_op, cl); -	struct search *s = container_of(op, struct search, op); -	struct bio *bio = op->cache_bio, *n; - -	if (op->skip) -		return bio_invalidate(cl); +	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); +	struct bio *bio = op->bio, *n;  	if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) {  		set_gc_sectors(op->c); -		bch_queue_gc(op->c); +		wake_up_gc(op->c);  	} +	if (op->bypass) +		return bch_data_invalidate(cl); +  	/*  	 * Journal writes are marked REQ_FLUSH; if the original write was a  	 * flush, it'll wait on the journal write. @@ -497,29 +210,30 @@ static void bch_insert_data_loop(struct closure *cl)  	do {  		unsigned i;  		struct bkey *k; -		struct bio_set *split = s->d -			? s->d->bio_split : op->c->bio_split; +		struct bio_set *split = op->c->bio_split;  		/* 1 for the device pointer and 1 for the chksum */ -		if (bch_keylist_realloc(&op->keys, -					1 + (op->csum ? 1 : 0), +		if (bch_keylist_realloc(&op->insert_keys, +					3 + (op->csum ? 1 : 0),  					op->c)) -			continue_at(cl, bch_journal, bcache_wq); +			continue_at(cl, bch_data_insert_keys, op->wq); -		k = op->keys.top; +		k = op->insert_keys.top;  		bkey_init(k);  		SET_KEY_INODE(k, op->inode); -		SET_KEY_OFFSET(k, bio->bi_sector); +		SET_KEY_OFFSET(k, bio->bi_iter.bi_sector); -		if (!bch_alloc_sectors(k, bio_sectors(bio), s)) +		if (!bch_alloc_sectors(op->c, k, bio_sectors(bio), +				       op->write_point, op->write_prio, +				       op->writeback))  			goto err; -		n = bch_bio_split(bio, KEY_SIZE(k), GFP_NOIO, split); +		n = bio_next_split(bio, KEY_SIZE(k), GFP_NOIO, split); -		n->bi_end_io	= bch_insert_data_endio; +		n->bi_end_io	= bch_data_insert_endio;  		n->bi_private	= cl; -		if (s->writeback) { +		if (op->writeback) {  			SET_KEY_DIRTY(k, true);  			for (i = 0; i < KEY_PTRS(k); i++) @@ -532,17 +246,17 @@ static void bch_insert_data_loop(struct closure *cl)  			bio_csum(n, k);  		trace_bcache_cache_insert(k); -		bch_keylist_push(&op->keys); +		bch_keylist_push(&op->insert_keys);  		n->bi_rw |= REQ_WRITE;  		bch_submit_bbio(n, op->c, k, 0);  	} while (n != bio);  	op->insert_data_done = true; -	continue_at(cl, bch_journal, bcache_wq); +	continue_at(cl, bch_data_insert_keys, op->wq);  err:  	/* bch_alloc_sectors() blocks if s->writeback = true */ -	BUG_ON(s->writeback); +	BUG_ON(op->writeback);  	/*  	 * But if it's not a writeback write we'd rather just bail out if @@ -550,15 +264,15 @@ err:  	 * we might be starving btree writes for gc or something.  	 */ -	if (s->write) { +	if (!op->replace) {  		/*  		 * Writethrough write: We can't complete the write until we've  		 * updated the index. But we don't want to delay the write while  		 * we wait for buckets to be freed up, so just invalidate the  		 * rest of the write.  		 */ -		op->skip = true; -		return bio_invalidate(cl); +		op->bypass = true; +		return bch_data_invalidate(cl);  	} else {  		/*  		 * From a cache miss, we can just insert the keys for the data @@ -567,15 +281,15 @@ err:  		op->insert_data_done = true;  		bio_put(bio); -		if (!bch_keylist_empty(&op->keys)) -			continue_at(cl, bch_journal, bcache_wq); +		if (!bch_keylist_empty(&op->insert_keys)) +			continue_at(cl, bch_data_insert_keys, op->wq);  		else  			closure_return(cl);  	}  }  /** - * bch_insert_data - stick some data in the cache + * bch_data_insert - stick some data in the cache   *   * This is the starting point for any data to end up in a cache device; it could   * be from a normal write, or a writeback write, or a write to a flash only @@ -587,56 +301,176 @@ err:   * data is written it calls bch_journal, and after the keys have been added to   * the next journal write they're inserted into the btree.   * - * It inserts the data in op->cache_bio; bi_sector is used for the key offset, + * It inserts the data in s->cache_bio; bi_sector is used for the key offset,   * and op->inode is used for the key inode.   * - * If op->skip is true, instead of inserting the data it invalidates the region - * of the cache represented by op->cache_bio and op->inode. + * If s->bypass is true, instead of inserting the data it invalidates the + * region of the cache represented by s->cache_bio and op->inode.   */ -void bch_insert_data(struct closure *cl) +void bch_data_insert(struct closure *cl)  { -	struct btree_op *op = container_of(cl, struct btree_op, cl); +	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); -	bch_keylist_init(&op->keys); -	bio_get(op->cache_bio); -	bch_insert_data_loop(cl); +	trace_bcache_write(op->bio, op->writeback, op->bypass); + +	bch_keylist_init(&op->insert_keys); +	bio_get(op->bio); +	bch_data_insert_start(cl);  } -void bch_btree_insert_async(struct closure *cl) +/* Congested? */ + +unsigned bch_get_congested(struct cache_set *c)  { -	struct btree_op *op = container_of(cl, struct btree_op, cl); -	struct search *s = container_of(op, struct search, op); +	int i; +	long rand; -	if (bch_btree_insert(op, op->c)) { -		s->error		= -ENOMEM; -		op->insert_data_done	= true; -	} +	if (!c->congested_read_threshold_us && +	    !c->congested_write_threshold_us) +		return 0; + +	i = (local_clock_us() - c->congested_last_us) / 1024; +	if (i < 0) +		return 0; + +	i += atomic_read(&c->congested); +	if (i >= 0) +		return 0; + +	i += CONGESTED_MAX; + +	if (i > 0) +		i = fract_exp_two(i, 6); -	if (op->insert_data_done) { -		bch_keylist_free(&op->keys); -		closure_return(cl); -	} else -		continue_at(cl, bch_insert_data_loop, bcache_wq); +	rand = get_random_int(); +	i -= bitmap_weight(&rand, BITS_PER_LONG); + +	return i > 0 ? i : 1;  } -/* Common code for the make_request functions */ +static void add_sequential(struct task_struct *t) +{ +	ewma_add(t->sequential_io_avg, +		 t->sequential_io, 8, 0); -static void request_endio(struct bio *bio, int error) +	t->sequential_io = 0; +} + +static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)  { -	struct closure *cl = bio->bi_private; +	return &dc->io_hash[hash_64(k, RECENT_IO_BITS)]; +} -	if (error) { -		struct search *s = container_of(cl, struct search, cl); -		s->error = error; -		/* Only cache read errors are recoverable */ -		s->recoverable = false; +static bool check_should_bypass(struct cached_dev *dc, struct bio *bio) +{ +	struct cache_set *c = dc->disk.c; +	unsigned mode = cache_mode(dc, bio); +	unsigned sectors, congested = bch_get_congested(c); +	struct task_struct *task = current; +	struct io *i; + +	if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) || +	    c->gc_stats.in_use > CUTOFF_CACHE_ADD || +	    (bio->bi_rw & REQ_DISCARD)) +		goto skip; + +	if (mode == CACHE_MODE_NONE || +	    (mode == CACHE_MODE_WRITEAROUND && +	     (bio->bi_rw & REQ_WRITE))) +		goto skip; + +	if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) || +	    bio_sectors(bio) & (c->sb.block_size - 1)) { +		pr_debug("skipping unaligned io"); +		goto skip;  	} -	bio_put(bio); -	closure_put(cl); +	if (bypass_torture_test(dc)) { +		if ((get_random_int() & 3) == 3) +			goto skip; +		else +			goto rescale; +	} + +	if (!congested && !dc->sequential_cutoff) +		goto rescale; + +	if (!congested && +	    mode == CACHE_MODE_WRITEBACK && +	    (bio->bi_rw & REQ_WRITE) && +	    (bio->bi_rw & REQ_SYNC)) +		goto rescale; + +	spin_lock(&dc->io_lock); + +	hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash) +		if (i->last == bio->bi_iter.bi_sector && +		    time_before(jiffies, i->jiffies)) +			goto found; + +	i = list_first_entry(&dc->io_lru, struct io, lru); + +	add_sequential(task); +	i->sequential = 0; +found: +	if (i->sequential + bio->bi_iter.bi_size > i->sequential) +		i->sequential	+= bio->bi_iter.bi_size; + +	i->last			 = bio_end_sector(bio); +	i->jiffies		 = jiffies + msecs_to_jiffies(5000); +	task->sequential_io	 = i->sequential; + +	hlist_del(&i->hash); +	hlist_add_head(&i->hash, iohash(dc, i->last)); +	list_move_tail(&i->lru, &dc->io_lru); + +	spin_unlock(&dc->io_lock); + +	sectors = max(task->sequential_io, +		      task->sequential_io_avg) >> 9; + +	if (dc->sequential_cutoff && +	    sectors >= dc->sequential_cutoff >> 9) { +		trace_bcache_bypass_sequential(bio); +		goto skip; +	} + +	if (congested && sectors >= congested) { +		trace_bcache_bypass_congested(bio); +		goto skip; +	} + +rescale: +	bch_rescale_priorities(c, bio_sectors(bio)); +	return false; +skip: +	bch_mark_sectors_bypassed(c, dc, bio_sectors(bio)); +	return true;  } -void bch_cache_read_endio(struct bio *bio, int error) +/* Cache lookup */ + +struct search { +	/* Stack frame for bio_complete */ +	struct closure		cl; + +	struct bbio		bio; +	struct bio		*orig_bio; +	struct bio		*cache_miss; +	struct bcache_device	*d; + +	unsigned		insert_bio_sectors; +	unsigned		recoverable:1; +	unsigned		write:1; +	unsigned		read_dirty_data:1; + +	unsigned long		start_time; + +	struct btree_op		op; +	struct data_insert_op	iop; +}; + +static void bch_cache_read_endio(struct bio *bio, int error)  {  	struct bbio *b = container_of(bio, struct bbio, bio);  	struct closure *cl = bio->bi_private; @@ -650,13 +484,117 @@ void bch_cache_read_endio(struct bio *bio, int error)  	 */  	if (error) -		s->error = error; -	else if (ptr_stale(s->op.c, &b->key, 0)) { -		atomic_long_inc(&s->op.c->cache_read_races); -		s->error = -EINTR; +		s->iop.error = error; +	else if (!KEY_DIRTY(&b->key) && +		 ptr_stale(s->iop.c, &b->key, 0)) { +		atomic_long_inc(&s->iop.c->cache_read_races); +		s->iop.error = -EINTR; +	} + +	bch_bbio_endio(s->iop.c, bio, error, "reading from cache"); +} + +/* + * Read from a single key, handling the initial cache miss if the key starts in + * the middle of the bio + */ +static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k) +{ +	struct search *s = container_of(op, struct search, op); +	struct bio *n, *bio = &s->bio.bio; +	struct bkey *bio_key; +	unsigned ptr; + +	if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0) +		return MAP_CONTINUE; + +	if (KEY_INODE(k) != s->iop.inode || +	    KEY_START(k) > bio->bi_iter.bi_sector) { +		unsigned bio_sectors = bio_sectors(bio); +		unsigned sectors = KEY_INODE(k) == s->iop.inode +			? min_t(uint64_t, INT_MAX, +				KEY_START(k) - bio->bi_iter.bi_sector) +			: INT_MAX; + +		int ret = s->d->cache_miss(b, s, bio, sectors); +		if (ret != MAP_CONTINUE) +			return ret; + +		/* if this was a complete miss we shouldn't get here */ +		BUG_ON(bio_sectors <= sectors);  	} -	bch_bbio_endio(s->op.c, bio, error, "reading from cache"); +	if (!KEY_SIZE(k)) +		return MAP_CONTINUE; + +	/* XXX: figure out best pointer - for multiple cache devices */ +	ptr = 0; + +	PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO; + +	if (KEY_DIRTY(k)) +		s->read_dirty_data = true; + +	n = bio_next_split(bio, min_t(uint64_t, INT_MAX, +				      KEY_OFFSET(k) - bio->bi_iter.bi_sector), +			   GFP_NOIO, s->d->bio_split); + +	bio_key = &container_of(n, struct bbio, bio)->key; +	bch_bkey_copy_single_ptr(bio_key, k, ptr); + +	bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key); +	bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key); + +	n->bi_end_io	= bch_cache_read_endio; +	n->bi_private	= &s->cl; + +	/* +	 * The bucket we're reading from might be reused while our bio +	 * is in flight, and we could then end up reading the wrong +	 * data. +	 * +	 * We guard against this by checking (in cache_read_endio()) if +	 * the pointer is stale again; if so, we treat it as an error +	 * and reread from the backing device (but we don't pass that +	 * error up anywhere). +	 */ + +	__bch_submit_bbio(n, b->c); +	return n == bio ? MAP_DONE : MAP_CONTINUE; +} + +static void cache_lookup(struct closure *cl) +{ +	struct search *s = container_of(cl, struct search, iop.cl); +	struct bio *bio = &s->bio.bio; +	int ret; + +	bch_btree_op_init(&s->op, -1); + +	ret = bch_btree_map_keys(&s->op, s->iop.c, +				 &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0), +				 cache_lookup_fn, MAP_END_KEY); +	if (ret == -EAGAIN) +		continue_at(cl, cache_lookup, bcache_wq); + +	closure_return(cl); +} + +/* Common code for the make_request functions */ + +static void request_endio(struct bio *bio, int error) +{ +	struct closure *cl = bio->bi_private; + +	if (error) { +		struct search *s = container_of(cl, struct search, cl); +		s->iop.error = error; +		/* Only cache read errors are recoverable */ +		s->recoverable = false; +	} + +	bio_put(bio); +	closure_put(cl);  }  static void bio_complete(struct search *s) @@ -670,19 +608,21 @@ static void bio_complete(struct search *s)  		part_stat_add(cpu, &s->d->disk->part0, ticks[rw], duration);  		part_stat_unlock(); -		trace_bcache_request_end(s, s->orig_bio); -		bio_endio(s->orig_bio, s->error); +		trace_bcache_request_end(s->d, s->orig_bio); +		bio_endio(s->orig_bio, s->iop.error);  		s->orig_bio = NULL;  	}  } -static void do_bio_hook(struct search *s) +static void do_bio_hook(struct search *s, struct bio *orig_bio)  {  	struct bio *bio = &s->bio.bio; -	memcpy(bio, s->orig_bio, sizeof(struct bio)); +	bio_init(bio); +	__bio_clone_fast(bio, orig_bio);  	bio->bi_end_io		= request_endio;  	bio->bi_private		= &s->cl; +  	atomic_set(&bio->bi_cnt, 3);  } @@ -691,61 +631,44 @@ static void search_free(struct closure *cl)  	struct search *s = container_of(cl, struct search, cl);  	bio_complete(s); -	if (s->op.cache_bio) -		bio_put(s->op.cache_bio); - -	if (s->unaligned_bvec) -		mempool_free(s->bio.bio.bi_io_vec, s->d->unaligned_bvec); +	if (s->iop.bio) +		bio_put(s->iop.bio);  	closure_debug_destroy(cl);  	mempool_free(s, s->d->c->search);  } -static struct search *search_alloc(struct bio *bio, struct bcache_device *d) +static inline struct search *search_alloc(struct bio *bio, +					  struct bcache_device *d)  { -	struct bio_vec *bv; -	struct search *s = mempool_alloc(d->c->search, GFP_NOIO); -	memset(s, 0, offsetof(struct search, op.keys)); +	struct search *s; -	__closure_init(&s->cl, NULL); +	s = mempool_alloc(d->c->search, GFP_NOIO); + +	closure_init(&s->cl, NULL); +	do_bio_hook(s, bio); -	s->op.inode		= d->id; -	s->op.c			= d->c; -	s->d			= d; -	s->op.lock		= -1; -	s->task			= current;  	s->orig_bio		= bio; -	s->write		= (bio->bi_rw & REQ_WRITE) != 0; -	s->op.flush_journal	= (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0; -	s->op.skip		= (bio->bi_rw & REQ_DISCARD) != 0; +	s->cache_miss		= NULL; +	s->d			= d;  	s->recoverable		= 1; +	s->write		= (bio->bi_rw & REQ_WRITE) != 0; +	s->read_dirty_data	= 0;  	s->start_time		= jiffies; -	do_bio_hook(s); -	if (bio->bi_size != bio_segments(bio) * PAGE_SIZE) { -		bv = mempool_alloc(d->unaligned_bvec, GFP_NOIO); -		memcpy(bv, bio_iovec(bio), -		       sizeof(struct bio_vec) * bio_segments(bio)); - -		s->bio.bio.bi_io_vec	= bv; -		s->unaligned_bvec	= 1; -	} +	s->iop.c		= d->c; +	s->iop.bio		= NULL; +	s->iop.inode		= d->id; +	s->iop.write_point	= hash_long((unsigned long) current, 16); +	s->iop.write_prio	= 0; +	s->iop.error		= 0; +	s->iop.flags		= 0; +	s->iop.flush_journal	= (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0; +	s->iop.wq		= bcache_wq;  	return s;  } -static void btree_read_async(struct closure *cl) -{ -	struct btree_op *op = container_of(cl, struct btree_op, cl); - -	int ret = btree_root(search_recurse, op->c, op); - -	if (ret == -EAGAIN) -		continue_at(cl, btree_read_async, bcache_wq); - -	closure_return(cl); -} -  /* Cached devices */  static void cached_dev_bio_complete(struct closure *cl) @@ -759,190 +682,179 @@ static void cached_dev_bio_complete(struct closure *cl)  /* Process reads */ -static void cached_dev_read_complete(struct closure *cl) +static void cached_dev_cache_miss_done(struct closure *cl)  {  	struct search *s = container_of(cl, struct search, cl); -	if (s->op.insert_collision) -		bch_mark_cache_miss_collision(s); +	if (s->iop.replace_collision) +		bch_mark_cache_miss_collision(s->iop.c, s->d); -	if (s->op.cache_bio) { +	if (s->iop.bio) {  		int i;  		struct bio_vec *bv; -		__bio_for_each_segment(bv, s->op.cache_bio, i, 0) +		bio_for_each_segment_all(bv, s->iop.bio, i)  			__free_page(bv->bv_page);  	}  	cached_dev_bio_complete(cl);  } -static void request_read_error(struct closure *cl) +static void cached_dev_read_error(struct closure *cl)  {  	struct search *s = container_of(cl, struct search, cl); -	struct bio_vec *bv; -	int i; +	struct bio *bio = &s->bio.bio;  	if (s->recoverable) {  		/* Retry from the backing device: */  		trace_bcache_read_retry(s->orig_bio); -		s->error = 0; -		bv = s->bio.bio.bi_io_vec; -		do_bio_hook(s); -		s->bio.bio.bi_io_vec = bv; - -		if (!s->unaligned_bvec) -			bio_for_each_segment(bv, s->orig_bio, i) -				bv->bv_offset = 0, bv->bv_len = PAGE_SIZE; -		else -			memcpy(s->bio.bio.bi_io_vec, -			       bio_iovec(s->orig_bio), -			       sizeof(struct bio_vec) * -			       bio_segments(s->orig_bio)); +		s->iop.error = 0; +		do_bio_hook(s, s->orig_bio);  		/* XXX: invalidate cache */ -		closure_bio_submit(&s->bio.bio, &s->cl, s->d); +		closure_bio_submit(bio, cl, s->d);  	} -	continue_at(cl, cached_dev_read_complete, NULL); +	continue_at(cl, cached_dev_cache_miss_done, NULL);  } -static void request_read_done(struct closure *cl) +static void cached_dev_read_done(struct closure *cl)  {  	struct search *s = container_of(cl, struct search, cl);  	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);  	/* -	 * s->cache_bio != NULL implies that we had a cache miss; cache_bio now -	 * contains data ready to be inserted into the cache. +	 * We had a cache miss; cache_bio now contains data ready to be inserted +	 * into the cache.  	 *  	 * First, we copy the data we just read from cache_bio's bounce buffers  	 * to the buffers the original bio pointed to:  	 */ -	if (s->op.cache_bio) { -		bio_reset(s->op.cache_bio); -		s->op.cache_bio->bi_sector	= s->cache_miss->bi_sector; -		s->op.cache_bio->bi_bdev	= s->cache_miss->bi_bdev; -		s->op.cache_bio->bi_size	= s->cache_bio_sectors << 9; -		bch_bio_map(s->op.cache_bio, NULL); +	if (s->iop.bio) { +		bio_reset(s->iop.bio); +		s->iop.bio->bi_iter.bi_sector = s->cache_miss->bi_iter.bi_sector; +		s->iop.bio->bi_bdev = s->cache_miss->bi_bdev; +		s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9; +		bch_bio_map(s->iop.bio, NULL); -		bio_copy_data(s->cache_miss, s->op.cache_bio); +		bio_copy_data(s->cache_miss, s->iop.bio);  		bio_put(s->cache_miss);  		s->cache_miss = NULL;  	} -	if (verify(dc, &s->bio.bio) && s->recoverable) -		bch_data_verify(s); +	if (verify(dc, &s->bio.bio) && s->recoverable && !s->read_dirty_data) +		bch_data_verify(dc, s->orig_bio);  	bio_complete(s); -	if (s->op.cache_bio && -	    !test_bit(CACHE_SET_STOPPING, &s->op.c->flags)) { -		s->op.type = BTREE_REPLACE; -		closure_call(&s->op.cl, bch_insert_data, NULL, cl); +	if (s->iop.bio && +	    !test_bit(CACHE_SET_STOPPING, &s->iop.c->flags)) { +		BUG_ON(!s->iop.replace); +		closure_call(&s->iop.cl, bch_data_insert, NULL, cl);  	} -	continue_at(cl, cached_dev_read_complete, NULL); +	continue_at(cl, cached_dev_cache_miss_done, NULL);  } -static void request_read_done_bh(struct closure *cl) +static void cached_dev_read_done_bh(struct closure *cl)  {  	struct search *s = container_of(cl, struct search, cl);  	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); -	bch_mark_cache_accounting(s, !s->cache_miss, s->op.skip); -	trace_bcache_read(s->orig_bio, !s->cache_miss, s->op.skip); +	bch_mark_cache_accounting(s->iop.c, s->d, +				  !s->cache_miss, s->iop.bypass); +	trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass); -	if (s->error) -		continue_at_nobarrier(cl, request_read_error, bcache_wq); -	else if (s->op.cache_bio || verify(dc, &s->bio.bio)) -		continue_at_nobarrier(cl, request_read_done, bcache_wq); +	if (s->iop.error) +		continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq); +	else if (s->iop.bio || verify(dc, &s->bio.bio)) +		continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq);  	else -		continue_at_nobarrier(cl, cached_dev_read_complete, NULL); +		continue_at_nobarrier(cl, cached_dev_bio_complete, NULL);  }  static int cached_dev_cache_miss(struct btree *b, struct search *s,  				 struct bio *bio, unsigned sectors)  { -	int ret = 0; -	unsigned reada; +	int ret = MAP_CONTINUE; +	unsigned reada = 0;  	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); -	struct bio *miss; - -	miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split); -	if (miss == bio) -		s->op.lookup_done = true; +	struct bio *miss, *cache_bio; -	miss->bi_end_io		= request_endio; -	miss->bi_private	= &s->cl; - -	if (s->cache_miss || s->op.skip) +	if (s->cache_miss || s->iop.bypass) { +		miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split); +		ret = miss == bio ? MAP_DONE : MAP_CONTINUE;  		goto out_submit; - -	if (miss != bio || -	    (bio->bi_rw & REQ_RAHEAD) || -	    (bio->bi_rw & REQ_META) || -	    s->op.c->gc_stats.in_use >= CUTOFF_CACHE_READA) -		reada = 0; -	else { -		reada = min(dc->readahead >> 9, -			    sectors - bio_sectors(miss)); - -		if (bio_end_sector(miss) + reada > bdev_sectors(miss->bi_bdev)) -			reada = bdev_sectors(miss->bi_bdev) - -				bio_end_sector(miss);  	} -	s->cache_bio_sectors = bio_sectors(miss) + reada; -	s->op.cache_bio = bio_alloc_bioset(GFP_NOWAIT, -			DIV_ROUND_UP(s->cache_bio_sectors, PAGE_SECTORS), -			dc->disk.bio_split); +	if (!(bio->bi_rw & REQ_RAHEAD) && +	    !(bio->bi_rw & REQ_META) && +	    s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA) +		reada = min_t(sector_t, dc->readahead >> 9, +			      bdev_sectors(bio->bi_bdev) - bio_end_sector(bio)); -	if (!s->op.cache_bio) -		goto out_submit; +	s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada); + +	s->iop.replace_key = KEY(s->iop.inode, +				 bio->bi_iter.bi_sector + s->insert_bio_sectors, +				 s->insert_bio_sectors); + +	ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key); +	if (ret) +		return ret; -	s->op.cache_bio->bi_sector	= miss->bi_sector; -	s->op.cache_bio->bi_bdev	= miss->bi_bdev; -	s->op.cache_bio->bi_size	= s->cache_bio_sectors << 9; +	s->iop.replace = true; -	s->op.cache_bio->bi_end_io	= request_endio; -	s->op.cache_bio->bi_private	= &s->cl; +	miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);  	/* btree_search_recurse()'s btree iterator is no good anymore */ -	ret = -EINTR; -	if (!bch_btree_insert_check_key(b, &s->op, s->op.cache_bio)) -		goto out_put; +	ret = miss == bio ? MAP_DONE : -EINTR; -	bch_bio_map(s->op.cache_bio, NULL); -	if (bio_alloc_pages(s->op.cache_bio, __GFP_NOWARN|GFP_NOIO)) +	cache_bio = bio_alloc_bioset(GFP_NOWAIT, +			DIV_ROUND_UP(s->insert_bio_sectors, PAGE_SECTORS), +			dc->disk.bio_split); +	if (!cache_bio) +		goto out_submit; + +	cache_bio->bi_iter.bi_sector	= miss->bi_iter.bi_sector; +	cache_bio->bi_bdev		= miss->bi_bdev; +	cache_bio->bi_iter.bi_size	= s->insert_bio_sectors << 9; + +	cache_bio->bi_end_io	= request_endio; +	cache_bio->bi_private	= &s->cl; + +	bch_bio_map(cache_bio, NULL); +	if (bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO))  		goto out_put; -	s->cache_miss = miss; -	bio_get(s->op.cache_bio); +	if (reada) +		bch_mark_cache_readahead(s->iop.c, s->d); -	closure_bio_submit(s->op.cache_bio, &s->cl, s->d); +	s->cache_miss	= miss; +	s->iop.bio	= cache_bio; +	bio_get(cache_bio); +	closure_bio_submit(cache_bio, &s->cl, s->d);  	return ret;  out_put: -	bio_put(s->op.cache_bio); -	s->op.cache_bio = NULL; +	bio_put(cache_bio);  out_submit: +	miss->bi_end_io		= request_endio; +	miss->bi_private	= &s->cl;  	closure_bio_submit(miss, &s->cl, s->d);  	return ret;  } -static void request_read(struct cached_dev *dc, struct search *s) +static void cached_dev_read(struct cached_dev *dc, struct search *s)  {  	struct closure *cl = &s->cl; -	check_should_skip(dc, s); -	closure_call(&s->op.cl, btree_read_async, NULL, cl); - -	continue_at(cl, request_read_done_bh, NULL); +	closure_call(&s->iop.cl, cache_lookup, NULL, cl); +	continue_at(cl, cached_dev_read_done_bh, NULL);  }  /* Process writes */ @@ -956,88 +868,84 @@ static void cached_dev_write_complete(struct closure *cl)  	cached_dev_bio_complete(cl);  } -static void request_write(struct cached_dev *dc, struct search *s) +static void cached_dev_write(struct cached_dev *dc, struct search *s)  {  	struct closure *cl = &s->cl;  	struct bio *bio = &s->bio.bio; -	struct bkey start, end; -	start = KEY(dc->disk.id, bio->bi_sector, 0); -	end = KEY(dc->disk.id, bio_end_sector(bio), 0); +	struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0); +	struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0); -	bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys, &start, &end); +	bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end); -	check_should_skip(dc, s);  	down_read_non_owner(&dc->writeback_lock); -  	if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) { -		s->op.skip	= false; -		s->writeback	= true; +		/* +		 * We overlap with some dirty data undergoing background +		 * writeback, force this write to writeback +		 */ +		s->iop.bypass = false; +		s->iop.writeback = true;  	} +	/* +	 * Discards aren't _required_ to do anything, so skipping if +	 * check_overlapping returned true is ok +	 * +	 * But check_overlapping drops dirty keys for which io hasn't started, +	 * so we still want to call it. +	 */  	if (bio->bi_rw & REQ_DISCARD) -		goto skip; +		s->iop.bypass = true;  	if (should_writeback(dc, s->orig_bio,  			     cache_mode(dc, bio), -			     s->op.skip)) { -		s->op.skip = false; -		s->writeback = true; +			     s->iop.bypass)) { +		s->iop.bypass = false; +		s->iop.writeback = true;  	} -	if (s->op.skip) -		goto skip; - -	trace_bcache_write(s->orig_bio, s->writeback, s->op.skip); +	if (s->iop.bypass) { +		s->iop.bio = s->orig_bio; +		bio_get(s->iop.bio); -	if (!s->writeback) { -		s->op.cache_bio = bio_clone_bioset(bio, GFP_NOIO, -						   dc->disk.bio_split); - -		closure_bio_submit(bio, cl, s->d); -	} else { +		if (!(bio->bi_rw & REQ_DISCARD) || +		    blk_queue_discard(bdev_get_queue(dc->bdev))) +			closure_bio_submit(bio, cl, s->d); +	} else if (s->iop.writeback) {  		bch_writeback_add(dc); +		s->iop.bio = bio; -		if (s->op.flush_journal) { +		if (bio->bi_rw & REQ_FLUSH) {  			/* Also need to send a flush to the backing device */ -			s->op.cache_bio = bio_clone_bioset(bio, GFP_NOIO, -							   dc->disk.bio_split); +			struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0, +							     dc->disk.bio_split); -			bio->bi_size = 0; -			bio->bi_vcnt = 0; -			closure_bio_submit(bio, cl, s->d); -		} else { -			s->op.cache_bio = bio; +			flush->bi_rw	= WRITE_FLUSH; +			flush->bi_bdev	= bio->bi_bdev; +			flush->bi_end_io = request_endio; +			flush->bi_private = cl; + +			closure_bio_submit(flush, cl, s->d);  		} -	} -out: -	closure_call(&s->op.cl, bch_insert_data, NULL, cl); -	continue_at(cl, cached_dev_write_complete, NULL); -skip: -	s->op.skip = true; -	s->op.cache_bio = s->orig_bio; -	bio_get(s->op.cache_bio); +	} else { +		s->iop.bio = bio_clone_fast(bio, GFP_NOIO, dc->disk.bio_split); -	if ((bio->bi_rw & REQ_DISCARD) && -	    !blk_queue_discard(bdev_get_queue(dc->bdev))) -		goto out; +		closure_bio_submit(bio, cl, s->d); +	} -	closure_bio_submit(bio, cl, s->d); -	goto out; +	closure_call(&s->iop.cl, bch_data_insert, NULL, cl); +	continue_at(cl, cached_dev_write_complete, NULL);  } -static void request_nodata(struct cached_dev *dc, struct search *s) +static void cached_dev_nodata(struct closure *cl)  { -	struct closure *cl = &s->cl; +	struct search *s = container_of(cl, struct search, cl);  	struct bio *bio = &s->bio.bio; -	if (bio->bi_rw & REQ_DISCARD) { -		request_write(dc, s); -		return; -	} - -	if (s->op.flush_journal) -		bch_journal_meta(s->op.c, cl); +	if (s->iop.flush_journal) +		bch_journal_meta(s->iop.c, cl); +	/* If it's a flush, we send the flush to the backing device too */  	closure_bio_submit(bio, cl, s->d);  	continue_at(cl, cached_dev_bio_complete, NULL); @@ -1045,134 +953,6 @@ static void request_nodata(struct cached_dev *dc, struct search *s)  /* Cached devices - read & write stuff */ -unsigned bch_get_congested(struct cache_set *c) -{ -	int i; -	long rand; - -	if (!c->congested_read_threshold_us && -	    !c->congested_write_threshold_us) -		return 0; - -	i = (local_clock_us() - c->congested_last_us) / 1024; -	if (i < 0) -		return 0; - -	i += atomic_read(&c->congested); -	if (i >= 0) -		return 0; - -	i += CONGESTED_MAX; - -	if (i > 0) -		i = fract_exp_two(i, 6); - -	rand = get_random_int(); -	i -= bitmap_weight(&rand, BITS_PER_LONG); - -	return i > 0 ? i : 1; -} - -static void add_sequential(struct task_struct *t) -{ -	ewma_add(t->sequential_io_avg, -		 t->sequential_io, 8, 0); - -	t->sequential_io = 0; -} - -static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k) -{ -	return &dc->io_hash[hash_64(k, RECENT_IO_BITS)]; -} - -static void check_should_skip(struct cached_dev *dc, struct search *s) -{ -	struct cache_set *c = s->op.c; -	struct bio *bio = &s->bio.bio; -	unsigned mode = cache_mode(dc, bio); -	unsigned sectors, congested = bch_get_congested(c); - -	if (atomic_read(&dc->disk.detaching) || -	    c->gc_stats.in_use > CUTOFF_CACHE_ADD || -	    (bio->bi_rw & REQ_DISCARD)) -		goto skip; - -	if (mode == CACHE_MODE_NONE || -	    (mode == CACHE_MODE_WRITEAROUND && -	     (bio->bi_rw & REQ_WRITE))) -		goto skip; - -	if (bio->bi_sector   & (c->sb.block_size - 1) || -	    bio_sectors(bio) & (c->sb.block_size - 1)) { -		pr_debug("skipping unaligned io"); -		goto skip; -	} - -	if (!congested && !dc->sequential_cutoff) -		goto rescale; - -	if (!congested && -	    mode == CACHE_MODE_WRITEBACK && -	    (bio->bi_rw & REQ_WRITE) && -	    (bio->bi_rw & REQ_SYNC)) -		goto rescale; - -	if (dc->sequential_merge) { -		struct io *i; - -		spin_lock(&dc->io_lock); - -		hlist_for_each_entry(i, iohash(dc, bio->bi_sector), hash) -			if (i->last == bio->bi_sector && -			    time_before(jiffies, i->jiffies)) -				goto found; - -		i = list_first_entry(&dc->io_lru, struct io, lru); - -		add_sequential(s->task); -		i->sequential = 0; -found: -		if (i->sequential + bio->bi_size > i->sequential) -			i->sequential	+= bio->bi_size; - -		i->last			 = bio_end_sector(bio); -		i->jiffies		 = jiffies + msecs_to_jiffies(5000); -		s->task->sequential_io	 = i->sequential; - -		hlist_del(&i->hash); -		hlist_add_head(&i->hash, iohash(dc, i->last)); -		list_move_tail(&i->lru, &dc->io_lru); - -		spin_unlock(&dc->io_lock); -	} else { -		s->task->sequential_io = bio->bi_size; - -		add_sequential(s->task); -	} - -	sectors = max(s->task->sequential_io, -		      s->task->sequential_io_avg) >> 9; - -	if (dc->sequential_cutoff && -	    sectors >= dc->sequential_cutoff >> 9) { -		trace_bcache_bypass_sequential(s->orig_bio); -		goto skip; -	} - -	if (congested && sectors >= congested) { -		trace_bcache_bypass_congested(s->orig_bio); -		goto skip; -	} - -rescale: -	bch_rescale_priorities(c, bio_sectors(bio)); -	return; -skip: -	bch_mark_sectors_bypassed(s, bio_sectors(bio)); -	s->op.skip = true; -} -  static void cached_dev_make_request(struct request_queue *q, struct bio *bio)  {  	struct search *s; @@ -1186,18 +966,28 @@ static void cached_dev_make_request(struct request_queue *q, struct bio *bio)  	part_stat_unlock();  	bio->bi_bdev = dc->bdev; -	bio->bi_sector += dc->sb.data_offset; +	bio->bi_iter.bi_sector += dc->sb.data_offset;  	if (cached_dev_get(dc)) {  		s = search_alloc(bio, d); -		trace_bcache_request_start(s, bio); +		trace_bcache_request_start(s->d, bio); + +		if (!bio->bi_iter.bi_size) { +			/* +			 * can't call bch_journal_meta from under +			 * generic_make_request +			 */ +			continue_at_nobarrier(&s->cl, +					      cached_dev_nodata, +					      bcache_wq); +		} else { +			s->iop.bypass = check_should_bypass(dc, bio); -		if (!bio_has_data(bio)) -			request_nodata(dc, s); -		else if (rw) -			request_write(dc, s); -		else -			request_read(dc, s); +			if (rw) +				cached_dev_write(dc, s); +			else +				cached_dev_read(dc, s); +		}  	} else {  		if ((bio->bi_rw & REQ_DISCARD) &&  		    !blk_queue_discard(bdev_get_queue(dc->bdev))) @@ -1254,27 +1044,28 @@ void bch_cached_dev_request_init(struct cached_dev *dc)  static int flash_dev_cache_miss(struct btree *b, struct search *s,  				struct bio *bio, unsigned sectors)  { -	struct bio_vec *bv; -	int i; +	unsigned bytes = min(sectors, bio_sectors(bio)) << 9; -	/* Zero fill bio */ +	swap(bio->bi_iter.bi_size, bytes); +	zero_fill_bio(bio); +	swap(bio->bi_iter.bi_size, bytes); -	bio_for_each_segment(bv, bio, i) { -		unsigned j = min(bv->bv_len >> 9, sectors); +	bio_advance(bio, bytes); -		void *p = kmap(bv->bv_page); -		memset(p + bv->bv_offset, 0, j << 9); -		kunmap(bv->bv_page); +	if (!bio->bi_iter.bi_size) +		return MAP_DONE; -		sectors	-= j; -	} +	return MAP_CONTINUE; +} -	bio_advance(bio, min(sectors << 9, bio->bi_size)); +static void flash_dev_nodata(struct closure *cl) +{ +	struct search *s = container_of(cl, struct search, cl); -	if (!bio->bi_size) -		s->op.lookup_done = true; +	if (s->iop.flush_journal) +		bch_journal_meta(s->iop.c, cl); -	return 0; +	continue_at(cl, search_free, NULL);  }  static void flash_dev_make_request(struct request_queue *q, struct bio *bio) @@ -1293,23 +1084,28 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)  	cl = &s->cl;  	bio = &s->bio.bio; -	trace_bcache_request_start(s, bio); +	trace_bcache_request_start(s->d, bio); -	if (bio_has_data(bio) && !rw) { -		closure_call(&s->op.cl, btree_read_async, NULL, cl); -	} else if (bio_has_data(bio) || s->op.skip) { -		bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys, -					&KEY(d->id, bio->bi_sector, 0), +	if (!bio->bi_iter.bi_size) { +		/* +		 * can't call bch_journal_meta from under +		 * generic_make_request +		 */ +		continue_at_nobarrier(&s->cl, +				      flash_dev_nodata, +				      bcache_wq); +	} else if (rw) { +		bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, +					&KEY(d->id, bio->bi_iter.bi_sector, 0),  					&KEY(d->id, bio_end_sector(bio), 0)); -		s->writeback	= true; -		s->op.cache_bio	= bio; +		s->iop.bypass		= (bio->bi_rw & REQ_DISCARD) != 0; +		s->iop.writeback	= true; +		s->iop.bio		= bio; -		closure_call(&s->op.cl, bch_insert_data, NULL, cl); +		closure_call(&s->iop.cl, bch_data_insert, NULL, cl);  	} else { -		/* No data - probably a cache flush */ -		if (s->op.flush_journal) -			bch_journal_meta(s->op.c, cl); +		closure_call(&s->iop.cl, cache_lookup, NULL, cl);  	}  	continue_at(cl, search_free, NULL); @@ -1349,9 +1145,6 @@ void bch_flash_dev_request_init(struct bcache_device *d)  void bch_request_exit(void)  { -#ifdef CONFIG_CGROUP_BCACHE -	cgroup_unload_subsys(&bcache_subsys); -#endif  	if (bch_search_cache)  		kmem_cache_destroy(bch_search_cache);  } @@ -1362,11 +1155,5 @@ int __init bch_request_init(void)  	if (!bch_search_cache)  		return -ENOMEM; -#ifdef CONFIG_CGROUP_BCACHE -	cgroup_load_subsys(&bcache_subsys); -	init_bch_cgroup(&bcache_default_cgroup); - -	cgroup_add_cftypes(&bcache_subsys, bch_files); -#endif  	return 0;  } diff --git a/drivers/md/bcache/request.h b/drivers/md/bcache/request.h index 57dc4784f4f..1ff36875c2b 100644 --- a/drivers/md/bcache/request.h +++ b/drivers/md/bcache/request.h @@ -1,62 +1,43 @@  #ifndef _BCACHE_REQUEST_H_  #define _BCACHE_REQUEST_H_ -#include <linux/cgroup.h> - -struct search { -	/* Stack frame for bio_complete */ +struct data_insert_op {  	struct closure		cl; +	struct cache_set	*c; +	struct bio		*bio; +	struct workqueue_struct *wq; -	struct bcache_device	*d; -	struct task_struct	*task; +	unsigned		inode; +	uint16_t		write_point; +	uint16_t		write_prio; +	short			error; -	struct bbio		bio; -	struct bio		*orig_bio; -	struct bio		*cache_miss; -	unsigned		cache_bio_sectors; +	union { +		uint16_t	flags; -	unsigned		recoverable:1; -	unsigned		unaligned_bvec:1; +	struct { +		unsigned	bypass:1; +		unsigned	writeback:1; +		unsigned	flush_journal:1; +		unsigned	csum:1; -	unsigned		write:1; -	unsigned		writeback:1; +		unsigned	replace:1; +		unsigned	replace_collision:1; -	/* IO error returned to s->bio */ -	short			error; -	unsigned long		start_time; +		unsigned	insert_data_done:1; +	}; +	}; -	/* Anything past op->keys won't get zeroed in do_bio_hook */ -	struct btree_op		op; +	struct keylist		insert_keys; +	BKEY_PADDED(replace_key);  }; -void bch_cache_read_endio(struct bio *, int);  unsigned bch_get_congested(struct cache_set *); -void bch_insert_data(struct closure *cl); -void bch_btree_insert_async(struct closure *); -void bch_cache_read_endio(struct bio *, int); - -void bch_open_buckets_free(struct cache_set *); -int bch_open_buckets_alloc(struct cache_set *); +void bch_data_insert(struct closure *cl);  void bch_cached_dev_request_init(struct cached_dev *dc);  void bch_flash_dev_request_init(struct bcache_device *d);  extern struct kmem_cache *bch_search_cache, *bch_passthrough_cache; -struct bch_cgroup { -#ifdef CONFIG_CGROUP_BCACHE -	struct cgroup_subsys_state	css; -#endif -	/* -	 * We subtract one from the index into bch_cache_modes[], so that -	 * default == -1; this makes it so the rest match up with d->cache_mode, -	 * and we use d->cache_mode if cgrp->cache_mode < 0 -	 */ -	short				cache_mode; -	bool				verify; -	struct cache_stat_collector	stats; -}; - -struct bch_cgroup *bch_bio_to_cgroup(struct bio *bio); -  #endif /* _BCACHE_REQUEST_H_ */ diff --git a/drivers/md/bcache/stats.c b/drivers/md/bcache/stats.c index b8730e714d6..0ca072c20d0 100644 --- a/drivers/md/bcache/stats.c +++ b/drivers/md/bcache/stats.c @@ -7,7 +7,6 @@  #include "bcache.h"  #include "stats.h"  #include "btree.h" -#include "request.h"  #include "sysfs.h"  /* @@ -196,35 +195,33 @@ static void mark_cache_stats(struct cache_stat_collector *stats,  			atomic_inc(&stats->cache_bypass_misses);  } -void bch_mark_cache_accounting(struct search *s, bool hit, bool bypass) +void bch_mark_cache_accounting(struct cache_set *c, struct bcache_device *d, +			       bool hit, bool bypass)  { -	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); +	struct cached_dev *dc = container_of(d, struct cached_dev, disk);  	mark_cache_stats(&dc->accounting.collector, hit, bypass); -	mark_cache_stats(&s->op.c->accounting.collector, hit, bypass); -#ifdef CONFIG_CGROUP_BCACHE -	mark_cache_stats(&(bch_bio_to_cgroup(s->orig_bio)->stats), hit, bypass); -#endif +	mark_cache_stats(&c->accounting.collector, hit, bypass);  } -void bch_mark_cache_readahead(struct search *s) +void bch_mark_cache_readahead(struct cache_set *c, struct bcache_device *d)  { -	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); +	struct cached_dev *dc = container_of(d, struct cached_dev, disk);  	atomic_inc(&dc->accounting.collector.cache_readaheads); -	atomic_inc(&s->op.c->accounting.collector.cache_readaheads); +	atomic_inc(&c->accounting.collector.cache_readaheads);  } -void bch_mark_cache_miss_collision(struct search *s) +void bch_mark_cache_miss_collision(struct cache_set *c, struct bcache_device *d)  { -	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); +	struct cached_dev *dc = container_of(d, struct cached_dev, disk);  	atomic_inc(&dc->accounting.collector.cache_miss_collisions); -	atomic_inc(&s->op.c->accounting.collector.cache_miss_collisions); +	atomic_inc(&c->accounting.collector.cache_miss_collisions);  } -void bch_mark_sectors_bypassed(struct search *s, int sectors) +void bch_mark_sectors_bypassed(struct cache_set *c, struct cached_dev *dc, +			       int sectors)  { -	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);  	atomic_add(sectors, &dc->accounting.collector.sectors_bypassed); -	atomic_add(sectors, &s->op.c->accounting.collector.sectors_bypassed); +	atomic_add(sectors, &c->accounting.collector.sectors_bypassed);  }  void bch_cache_accounting_init(struct cache_accounting *acc, diff --git a/drivers/md/bcache/stats.h b/drivers/md/bcache/stats.h index c7c7a8fd29f..adbff141c88 100644 --- a/drivers/md/bcache/stats.h +++ b/drivers/md/bcache/stats.h @@ -38,7 +38,9 @@ struct cache_accounting {  	struct cache_stats day;  }; -struct search; +struct cache_set; +struct cached_dev; +struct bcache_device;  void bch_cache_accounting_init(struct cache_accounting *acc,  			       struct closure *parent); @@ -50,9 +52,10 @@ void bch_cache_accounting_clear(struct cache_accounting *acc);  void bch_cache_accounting_destroy(struct cache_accounting *acc); -void bch_mark_cache_accounting(struct search *s, bool hit, bool bypass); -void bch_mark_cache_readahead(struct search *s); -void bch_mark_cache_miss_collision(struct search *s); -void bch_mark_sectors_bypassed(struct search *s, int sectors); +void bch_mark_cache_accounting(struct cache_set *, struct bcache_device *, +			       bool, bool); +void bch_mark_cache_readahead(struct cache_set *, struct bcache_device *); +void bch_mark_cache_miss_collision(struct cache_set *, struct bcache_device *); +void bch_mark_sectors_bypassed(struct cache_set *, struct cached_dev *, int);  #endif /* _BCACHE_STATS_H_ */ diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 547c4c57b05..926ded8ccbf 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -9,6 +9,7 @@  #include "bcache.h"  #include "btree.h"  #include "debug.h" +#include "extents.h"  #include "request.h"  #include "writeback.h" @@ -16,6 +17,7 @@  #include <linux/buffer_head.h>  #include <linux/debugfs.h>  #include <linux/genhd.h> +#include <linux/idr.h>  #include <linux/kthread.h>  #include <linux/module.h>  #include <linux/random.h> @@ -45,21 +47,13 @@ const char * const bch_cache_modes[] = {  	NULL  }; -struct uuid_entry_v0 { -	uint8_t		uuid[16]; -	uint8_t		label[32]; -	uint32_t	first_reg; -	uint32_t	last_reg; -	uint32_t	invalidated; -	uint32_t	pad; -}; -  static struct kobject *bcache_kobj;  struct mutex bch_register_lock;  LIST_HEAD(bch_cache_sets);  static LIST_HEAD(uncached_devices); -static int bcache_major, bcache_minor; +static int bcache_major; +static DEFINE_IDA(bcache_minor);  static wait_queue_head_t unregister_wait;  struct workqueue_struct *bcache_wq; @@ -232,7 +226,7 @@ static void write_bdev_super_endio(struct bio *bio, int error)  	struct cached_dev *dc = bio->bi_private;  	/* XXX: error checking */ -	closure_put(&dc->sb_write.cl); +	closure_put(&dc->sb_write);  }  static void __write_super(struct cache_sb *sb, struct bio *bio) @@ -240,9 +234,9 @@ static void __write_super(struct cache_sb *sb, struct bio *bio)  	struct cache_sb *out = page_address(bio->bi_io_vec[0].bv_page);  	unsigned i; -	bio->bi_sector	= SB_SECTOR; -	bio->bi_rw	= REQ_SYNC|REQ_META; -	bio->bi_size	= SB_SIZE; +	bio->bi_iter.bi_sector	= SB_SECTOR; +	bio->bi_rw		= REQ_SYNC|REQ_META; +	bio->bi_iter.bi_size	= SB_SIZE;  	bch_bio_map(bio, NULL);  	out->offset		= cpu_to_le64(sb->offset); @@ -270,12 +264,20 @@ static void __write_super(struct cache_sb *sb, struct bio *bio)  	submit_bio(REQ_WRITE, bio);  } +static void bch_write_bdev_super_unlock(struct closure *cl) +{ +	struct cached_dev *dc = container_of(cl, struct cached_dev, sb_write); + +	up(&dc->sb_write_mutex); +} +  void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent)  { -	struct closure *cl = &dc->sb_write.cl; +	struct closure *cl = &dc->sb_write;  	struct bio *bio = &dc->sb_bio; -	closure_lock(&dc->sb_write, parent); +	down(&dc->sb_write_mutex); +	closure_init(cl, parent);  	bio_reset(bio);  	bio->bi_bdev	= dc->bdev; @@ -285,7 +287,7 @@ void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent)  	closure_get(cl);  	__write_super(&dc->sb, bio); -	closure_return(cl); +	closure_return_with_destructor(cl, bch_write_bdev_super_unlock);  }  static void write_super_endio(struct bio *bio, int error) @@ -293,16 +295,24 @@ static void write_super_endio(struct bio *bio, int error)  	struct cache *ca = bio->bi_private;  	bch_count_io_errors(ca, error, "writing superblock"); -	closure_put(&ca->set->sb_write.cl); +	closure_put(&ca->set->sb_write); +} + +static void bcache_write_super_unlock(struct closure *cl) +{ +	struct cache_set *c = container_of(cl, struct cache_set, sb_write); + +	up(&c->sb_write_mutex);  }  void bcache_write_super(struct cache_set *c)  { -	struct closure *cl = &c->sb_write.cl; +	struct closure *cl = &c->sb_write;  	struct cache *ca;  	unsigned i; -	closure_lock(&c->sb_write, &c->cl); +	down(&c->sb_write_mutex); +	closure_init(cl, &c->cl);  	c->sb.seq++; @@ -324,7 +334,7 @@ void bcache_write_super(struct cache_set *c)  		__write_super(&ca->sb, bio);  	} -	closure_return(cl); +	closure_return_with_destructor(cl, bcache_write_super_unlock);  }  /* UUID io */ @@ -332,29 +342,37 @@ void bcache_write_super(struct cache_set *c)  static void uuid_endio(struct bio *bio, int error)  {  	struct closure *cl = bio->bi_private; -	struct cache_set *c = container_of(cl, struct cache_set, uuid_write.cl); +	struct cache_set *c = container_of(cl, struct cache_set, uuid_write);  	cache_set_err_on(error, c, "accessing uuids");  	bch_bbio_free(bio, c);  	closure_put(cl);  } +static void uuid_io_unlock(struct closure *cl) +{ +	struct cache_set *c = container_of(cl, struct cache_set, uuid_write); + +	up(&c->uuid_write_mutex); +} +  static void uuid_io(struct cache_set *c, unsigned long rw,  		    struct bkey *k, struct closure *parent)  { -	struct closure *cl = &c->uuid_write.cl; +	struct closure *cl = &c->uuid_write;  	struct uuid_entry *u;  	unsigned i;  	char buf[80];  	BUG_ON(!parent); -	closure_lock(&c->uuid_write, parent); +	down(&c->uuid_write_mutex); +	closure_init(cl, parent);  	for (i = 0; i < KEY_PTRS(k); i++) {  		struct bio *bio = bch_bbio_alloc(c);  		bio->bi_rw	= REQ_SYNC|REQ_META|rw; -		bio->bi_size	= KEY_SIZE(k) << 9; +		bio->bi_iter.bi_size = KEY_SIZE(k) << 9;  		bio->bi_end_io	= uuid_endio;  		bio->bi_private = cl; @@ -366,7 +384,7 @@ static void uuid_io(struct cache_set *c, unsigned long rw,  			break;  	} -	bch_bkey_to_text(buf, sizeof(buf), k); +	bch_extent_to_text(buf, sizeof(buf), k);  	pr_debug("%s UUIDs at %s", rw & REQ_WRITE ? "wrote" : "read", buf);  	for (u = c->uuids; u < c->uuids + c->nr_uuids; u++) @@ -375,14 +393,14 @@ static void uuid_io(struct cache_set *c, unsigned long rw,  				 u - c->uuids, u->uuid, u->label,  				 u->first_reg, u->last_reg, u->invalidated); -	closure_return(cl); +	closure_return_with_destructor(cl, uuid_io_unlock);  }  static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl)  {  	struct bkey *k = &j->uuid_bucket; -	if (__bch_ptr_invalid(c, 1, k)) +	if (__bch_btree_ptr_invalid(c, k))  		return "bad uuid pointer";  	bkey_copy(&c->uuid_bucket, k); @@ -427,7 +445,7 @@ static int __uuid_write(struct cache_set *c)  	lockdep_assert_held(&bch_register_lock); -	if (bch_bucket_alloc_set(c, WATERMARK_METADATA, &k.key, 1, &cl)) +	if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, true))  		return 1;  	SET_KEY_SIZE(&k.key, c->sb.bucket_size); @@ -435,7 +453,7 @@ static int __uuid_write(struct cache_set *c)  	closure_sync(&cl);  	bkey_copy(&c->uuid_bucket, &k.key); -	__bkey_put(c, &k.key); +	bkey_put(c, &k.key);  	return 0;  } @@ -510,10 +528,10 @@ static void prio_io(struct cache *ca, uint64_t bucket, unsigned long rw)  	closure_init_stack(cl); -	bio->bi_sector	= bucket * ca->sb.bucket_size; -	bio->bi_bdev	= ca->bdev; -	bio->bi_rw	= REQ_SYNC|REQ_META|rw; -	bio->bi_size	= bucket_bytes(ca); +	bio->bi_iter.bi_sector	= bucket * ca->sb.bucket_size; +	bio->bi_bdev		= ca->bdev; +	bio->bi_rw		= REQ_SYNC|REQ_META|rw; +	bio->bi_iter.bi_size	= bucket_bytes(ca);  	bio->bi_end_io	= prio_endio;  	bio->bi_private = ca; @@ -523,9 +541,6 @@ static void prio_io(struct cache *ca, uint64_t bucket, unsigned long rw)  	closure_sync(cl);  } -#define buckets_free(c)	"free %zu, free_inc %zu, unused %zu",		\ -	fifo_used(&c->free), fifo_used(&c->free_inc), fifo_used(&c->unused) -  void bch_prio_write(struct cache *ca)  {  	int i; @@ -536,17 +551,13 @@ void bch_prio_write(struct cache *ca)  	lockdep_assert_held(&ca->set->bucket_lock); -	for (b = ca->buckets; -	     b < ca->buckets + ca->sb.nbuckets; b++) -		b->disk_gen = b->gen; -  	ca->disk_buckets->seq++;  	atomic_long_add(ca->sb.bucket_size * prio_buckets(ca),  			&ca->meta_sectors_written); -	pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free), -		 fifo_used(&ca->free_inc), fifo_used(&ca->unused)); +	//pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free), +	//	 fifo_used(&ca->free_inc), fifo_used(&ca->unused));  	for (i = prio_buckets(ca) - 1; i >= 0; --i) {  		long bucket; @@ -562,10 +573,10 @@ void bch_prio_write(struct cache *ca)  		}  		p->next_bucket	= ca->prio_buckets[i + 1]; -		p->magic	= pset_magic(ca); +		p->magic	= pset_magic(&ca->sb);  		p->csum		= bch_crc64(&p->magic, bucket_bytes(ca) - 8); -		bucket = bch_bucket_alloc(ca, WATERMARK_PRIO, &cl); +		bucket = bch_bucket_alloc(ca, RESERVE_PRIO, true);  		BUG_ON(bucket == -1);  		mutex_unlock(&ca->set->bucket_lock); @@ -583,14 +594,17 @@ void bch_prio_write(struct cache *ca)  	mutex_lock(&ca->set->bucket_lock); -	ca->need_save_prio = 0; -  	/*  	 * Don't want the old priorities to get garbage collected until after we  	 * finish writing the new ones, and they're journalled  	 */ -	for (i = 0; i < prio_buckets(ca); i++) +	for (i = 0; i < prio_buckets(ca); i++) { +		if (ca->prio_last_buckets[i]) +			__bch_bucket_free(ca, +				&ca->buckets[ca->prio_last_buckets[i]]); +  		ca->prio_last_buckets[i] = ca->prio_buckets[i]; +	}  }  static void prio_read(struct cache *ca, uint64_t bucket) @@ -613,7 +627,7 @@ static void prio_read(struct cache *ca, uint64_t bucket)  			if (p->csum != bch_crc64(&p->magic, bucket_bytes(ca) - 8))  				pr_warn("bad csum reading priorities"); -			if (p->magic != pset_magic(ca)) +			if (p->magic != pset_magic(&ca->sb))  				pr_warn("bad magic reading priorities");  			bucket = p->next_bucket; @@ -621,7 +635,7 @@ static void prio_read(struct cache *ca, uint64_t bucket)  		}  		b->prio = le16_to_cpu(d->prio); -		b->gen = b->disk_gen = b->last_gc = b->gc_gen = d->gen; +		b->gen = b->last_gc = d->gen;  	}  } @@ -630,7 +644,7 @@ static void prio_read(struct cache *ca, uint64_t bucket)  static int open_dev(struct block_device *b, fmode_t mode)  {  	struct bcache_device *d = b->bd_disk->private_data; -	if (atomic_read(&d->closing)) +	if (test_bit(BCACHE_DEV_CLOSING, &d->flags))  		return -ENXIO;  	closure_get(&d->cl); @@ -659,20 +673,24 @@ static const struct block_device_operations bcache_ops = {  void bcache_device_stop(struct bcache_device *d)  { -	if (!atomic_xchg(&d->closing, 1)) +	if (!test_and_set_bit(BCACHE_DEV_CLOSING, &d->flags))  		closure_queue(&d->cl);  }  static void bcache_device_unlink(struct bcache_device *d)  { -	unsigned i; -	struct cache *ca; +	lockdep_assert_held(&bch_register_lock); -	sysfs_remove_link(&d->c->kobj, d->name); -	sysfs_remove_link(&d->kobj, "cache"); +	if (d->c && !test_and_set_bit(BCACHE_DEV_UNLINK_DONE, &d->flags)) { +		unsigned i; +		struct cache *ca; -	for_each_cache(ca, d->c, i) -		bd_unlink_disk_holder(ca->bdev, d->disk); +		sysfs_remove_link(&d->c->kobj, d->name); +		sysfs_remove_link(&d->kobj, "cache"); + +		for_each_cache(ca, d->c, i) +			bd_unlink_disk_holder(ca->bdev, d->disk); +	}  }  static void bcache_device_link(struct bcache_device *d, struct cache_set *c, @@ -696,19 +714,16 @@ static void bcache_device_detach(struct bcache_device *d)  {  	lockdep_assert_held(&bch_register_lock); -	if (atomic_read(&d->detaching)) { +	if (test_bit(BCACHE_DEV_DETACHING, &d->flags)) {  		struct uuid_entry *u = d->c->uuids + d->id;  		SET_UUID_FLASH_ONLY(u, 0);  		memcpy(u->uuid, invalid_uuid, 16);  		u->invalidated = cpu_to_le32(get_seconds());  		bch_uuid_write(d->c); - -		atomic_set(&d->detaching, 0);  	} -	if (!d->flush_done) -		bcache_device_unlink(d); +	bcache_device_unlink(d);  	d->c->devices[d->id] = NULL;  	closure_put(&d->c->caching); @@ -739,14 +754,18 @@ static void bcache_device_free(struct bcache_device *d)  		del_gendisk(d->disk);  	if (d->disk && d->disk->queue)  		blk_cleanup_queue(d->disk->queue); -	if (d->disk) +	if (d->disk) { +		ida_simple_remove(&bcache_minor, d->disk->first_minor);  		put_disk(d->disk); +	}  	bio_split_pool_free(&d->bio_split_hook); -	if (d->unaligned_bvec) -		mempool_destroy(d->unaligned_bvec);  	if (d->bio_split)  		bioset_free(d->bio_split); +	if (is_vmalloc_addr(d->full_dirty_stripes)) +		vfree(d->full_dirty_stripes); +	else +		kfree(d->full_dirty_stripes);  	if (is_vmalloc_addr(d->stripe_sectors_dirty))  		vfree(d->stripe_sectors_dirty);  	else @@ -760,15 +779,19 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,  {  	struct request_queue *q;  	size_t n; +	int minor; -	if (!d->stripe_size_bits) -		d->stripe_size_bits = 31; +	if (!d->stripe_size) +		d->stripe_size = 1 << 31; -	d->nr_stripes = round_up(sectors, 1 << d->stripe_size_bits) >> -		d->stripe_size_bits; +	d->nr_stripes = DIV_ROUND_UP_ULL(sectors, d->stripe_size); -	if (!d->nr_stripes || d->nr_stripes > SIZE_MAX / sizeof(atomic_t)) +	if (!d->nr_stripes || +	    d->nr_stripes > INT_MAX || +	    d->nr_stripes > SIZE_MAX / sizeof(atomic_t)) { +		pr_err("nr_stripes too large");  		return -ENOMEM; +	}  	n = d->nr_stripes * sizeof(atomic_t);  	d->stripe_sectors_dirty = n < PAGE_SIZE << 6 @@ -777,22 +800,36 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,  	if (!d->stripe_sectors_dirty)  		return -ENOMEM; +	n = BITS_TO_LONGS(d->nr_stripes) * sizeof(unsigned long); +	d->full_dirty_stripes = n < PAGE_SIZE << 6 +		? kzalloc(n, GFP_KERNEL) +		: vzalloc(n); +	if (!d->full_dirty_stripes) +		return -ENOMEM; + +	minor = ida_simple_get(&bcache_minor, 0, MINORMASK + 1, GFP_KERNEL); +	if (minor < 0) +		return minor; +  	if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) || -	    !(d->unaligned_bvec = mempool_create_kmalloc_pool(1, -				sizeof(struct bio_vec) * BIO_MAX_PAGES)) ||  	    bio_split_pool_init(&d->bio_split_hook) || -	    !(d->disk = alloc_disk(1)) || -	    !(q = blk_alloc_queue(GFP_KERNEL))) +	    !(d->disk = alloc_disk(1))) { +		ida_simple_remove(&bcache_minor, minor);  		return -ENOMEM; +	}  	set_capacity(d->disk, sectors); -	snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", bcache_minor); +	snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", minor);  	d->disk->major		= bcache_major; -	d->disk->first_minor	= bcache_minor++; +	d->disk->first_minor	= minor;  	d->disk->fops		= &bcache_ops;  	d->disk->private_data	= d; +	q = blk_alloc_queue(GFP_KERNEL); +	if (!q) +		return -ENOMEM; +  	blk_queue_make_request(q, NULL);  	d->disk->queue			= q;  	q->queuedata			= d; @@ -802,6 +839,7 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,  	q->limits.max_segment_size	= UINT_MAX;  	q->limits.max_segments		= BIO_MAX_PAGES;  	q->limits.max_discard_sectors	= UINT_MAX; +	q->limits.discard_granularity	= 512;  	q->limits.io_min		= block_size;  	q->limits.logical_block_size	= block_size;  	q->limits.physical_block_size	= block_size; @@ -874,7 +912,7 @@ static void cached_dev_detach_finish(struct work_struct *w)  	struct closure cl;  	closure_init_stack(&cl); -	BUG_ON(!atomic_read(&dc->disk.detaching)); +	BUG_ON(!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags));  	BUG_ON(atomic_read(&dc->count));  	mutex_lock(&bch_register_lock); @@ -888,6 +926,8 @@ static void cached_dev_detach_finish(struct work_struct *w)  	bcache_device_detach(&dc->disk);  	list_move(&dc->list, &uncached_devices); +	clear_bit(BCACHE_DEV_DETACHING, &dc->disk.flags); +  	mutex_unlock(&bch_register_lock);  	pr_info("Caching disabled for %s", bdevname(dc->bdev, buf)); @@ -900,10 +940,10 @@ void bch_cached_dev_detach(struct cached_dev *dc)  {  	lockdep_assert_held(&bch_register_lock); -	if (atomic_read(&dc->disk.closing)) +	if (test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags))  		return; -	if (atomic_xchg(&dc->disk.detaching, 1)) +	if (test_and_set_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))  		return;  	/* @@ -1030,6 +1070,7 @@ static void cached_dev_free(struct closure *cl)  	struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);  	cancel_delayed_work_sync(&dc->writeback_rate_update); +	kthread_stop(dc->writeback_thread);  	mutex_lock(&bch_register_lock); @@ -1058,11 +1099,7 @@ static void cached_dev_flush(struct closure *cl)  	struct bcache_device *d = &dc->disk;  	mutex_lock(&bch_register_lock); -	d->flush_done = 1; - -	if (d->c) -		bcache_device_unlink(d); - +	bcache_device_unlink(d);  	mutex_unlock(&bch_register_lock);  	bch_cache_accounting_destroy(&dc->accounting); @@ -1083,12 +1120,11 @@ static int cached_dev_init(struct cached_dev *dc, unsigned block_size)  	set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq);  	kobject_init(&dc->disk.kobj, &bch_cached_dev_ktype);  	INIT_WORK(&dc->detach, cached_dev_detach_finish); -	closure_init_unlocked(&dc->sb_write); +	sema_init(&dc->sb_write_mutex, 1);  	INIT_LIST_HEAD(&dc->io_lru);  	spin_lock_init(&dc->io_lock);  	bch_cache_accounting_init(&dc->accounting, &dc->disk.cl); -	dc->sequential_merge		= true;  	dc->sequential_cutoff		= 4 << 20;  	for (io = dc->io; io < dc->io + RECENT_IO; io++) { @@ -1096,6 +1132,12 @@ static int cached_dev_init(struct cached_dev *dc, unsigned block_size)  		hlist_add_head(&io->hash, dc->io_hash + RECENT_IO);  	} +	dc->disk.stripe_size = q->limits.io_opt >> 9; + +	if (dc->disk.stripe_size) +		dc->partial_stripes_expensive = +			q->limits.raid_partial_stripes_expensive; +  	ret = bcache_device_init(&dc->disk, block_size,  			 dc->bdev->bd_part->nr_sects - dc->sb.data_offset);  	if (ret) @@ -1260,7 +1302,8 @@ bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...)  {  	va_list args; -	if (test_bit(CACHE_SET_STOPPING, &c->flags)) +	if (c->on_error != ON_ERROR_PANIC && +	    test_bit(CACHE_SET_STOPPING, &c->flags))  		return false;  	/* XXX: we can be called from atomic context @@ -1275,6 +1318,9 @@ bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...)  	printk(", disabling caching\n"); +	if (c->on_error == ON_ERROR_PANIC) +		panic("panic forced after error\n"); +  	bch_cache_set_unregister(c);  	return true;  } @@ -1303,9 +1349,11 @@ static void cache_set_free(struct closure *cl)  		if (ca)  			kobject_put(&ca->kobj); +	bch_bset_sort_state_free(&c->sort);  	free_pages((unsigned long) c->uuids, ilog2(bucket_pages(c))); -	free_pages((unsigned long) c->sort, ilog2(bucket_pages(c))); +	if (c->moving_gc_wq) +		destroy_workqueue(c->moving_gc_wq);  	if (c->bio_split)  		bioset_free(c->bio_split);  	if (c->fill_iter) @@ -1339,18 +1387,28 @@ static void cache_set_flush(struct closure *cl)  	kobject_put(&c->internal);  	kobject_del(&c->kobj); +	if (c->gc_thread) +		kthread_stop(c->gc_thread); +  	if (!IS_ERR_OR_NULL(c->root))  		list_add(&c->root->list, &c->btree_cache);  	/* Should skip this if we're unregistering because of an error */ -	list_for_each_entry(b, &c->btree_cache, list) +	list_for_each_entry(b, &c->btree_cache, list) { +		mutex_lock(&b->write_lock);  		if (btree_node_dirty(b)) -			bch_btree_node_write(b, NULL); +			__bch_btree_node_write(b, NULL); +		mutex_unlock(&b->write_lock); +	}  	for_each_cache(ca, c, i)  		if (ca->alloc_thread)  			kthread_stop(ca->alloc_thread); +	cancel_delayed_work_sync(&c->journal.work); +	/* flush last journal entry if needed */ +	c->journal.work.work.func(&c->journal.work.work); +  	closure_return(cl);  } @@ -1426,19 +1484,21 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)  	c->block_bits		= ilog2(sb->block_size);  	c->nr_uuids		= bucket_bytes(c) / sizeof(struct uuid_entry); -	c->btree_pages		= c->sb.bucket_size / PAGE_SECTORS; +	c->btree_pages		= bucket_pages(c);  	if (c->btree_pages > BTREE_MAX_PAGES)  		c->btree_pages = max_t(int, c->btree_pages / 4,  				       BTREE_MAX_PAGES); -	c->sort_crit_factor = int_sqrt(c->btree_pages); - +	sema_init(&c->sb_write_mutex, 1);  	mutex_init(&c->bucket_lock); -	mutex_init(&c->sort_lock); -	spin_lock_init(&c->sort_time_lock); -	closure_init_unlocked(&c->sb_write); -	closure_init_unlocked(&c->uuid_write); -	spin_lock_init(&c->btree_read_time_lock); +	init_waitqueue_head(&c->btree_cache_wait); +	init_waitqueue_head(&c->bucket_wait); +	sema_init(&c->uuid_write_mutex, 1); + +	spin_lock_init(&c->btree_gc_time.lock); +	spin_lock_init(&c->btree_split_time.lock); +	spin_lock_init(&c->btree_read_time.lock); +  	bch_moving_init_cache_set(c);  	INIT_LIST_HEAD(&c->list); @@ -1461,11 +1521,12 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)  				bucket_pages(c))) ||  	    !(c->fill_iter = mempool_create_kmalloc_pool(1, iter_size)) ||  	    !(c->bio_split = bioset_create(4, offsetof(struct bbio, bio))) || -	    !(c->sort = alloc_bucket_pages(GFP_KERNEL, c)) ||  	    !(c->uuids = alloc_bucket_pages(GFP_KERNEL, c)) || +	    !(c->moving_gc_wq = create_workqueue("bcache_gc")) ||  	    bch_journal_alloc(c) ||  	    bch_btree_cache_alloc(c) || -	    bch_open_buckets_alloc(c)) +	    bch_open_buckets_alloc(c) || +	    bch_bset_sort_state_init(&c->sort, ilog2(c->btree_pages)))  		goto err;  	c->congested_read_threshold_us	= 2000; @@ -1483,11 +1544,10 @@ static void run_cache_set(struct cache_set *c)  	const char *err = "cannot allocate memory";  	struct cached_dev *dc, *t;  	struct cache *ca; +	struct closure cl;  	unsigned i; -	struct btree_op op; -	bch_btree_op_init_stack(&op); -	op.lock = SHRT_MAX; +	closure_init_stack(&cl);  	for_each_cache(ca, c, i)  		c->nbuckets += ca->sb.nbuckets; @@ -1498,7 +1558,7 @@ static void run_cache_set(struct cache_set *c)  		struct jset *j;  		err = "cannot allocate memory for journal"; -		if (bch_journal_read(c, &journal, &op)) +		if (bch_journal_read(c, &journal))  			goto err;  		pr_debug("btree_journal_read() done"); @@ -1522,27 +1582,27 @@ static void run_cache_set(struct cache_set *c)  		k = &j->btree_root;  		err = "bad btree root"; -		if (__bch_ptr_invalid(c, j->btree_level + 1, k)) +		if (__bch_btree_ptr_invalid(c, k))  			goto err;  		err = "error reading btree root"; -		c->root = bch_btree_node_get(c, k, j->btree_level, &op); +		c->root = bch_btree_node_get(c, NULL, k, j->btree_level, true);  		if (IS_ERR_OR_NULL(c->root))  			goto err;  		list_del_init(&c->root->list);  		rw_unlock(true, c->root); -		err = uuid_read(c, j, &op.cl); +		err = uuid_read(c, j, &cl);  		if (err)  			goto err;  		err = "error in recovery"; -		if (bch_btree_check(c, &op)) +		if (bch_btree_check(c))  			goto err;  		bch_journal_mark(c, &journal); -		bch_btree_gc_finish(c); +		bch_initial_gc_finish(c);  		pr_debug("btree_check() done");  		/* @@ -1570,11 +1630,9 @@ static void run_cache_set(struct cache_set *c)  		if (j->version < BCACHE_JSET_VERSION_UUID)  			__uuid_write(c); -		bch_journal_replay(c, &journal, &op); +		bch_journal_replay(c, &journal);  	} else {  		pr_notice("invalidating existing data"); -		/* Don't want invalidate_buckets() to queue a gc yet */ -		closure_lock(&c->gc, NULL);  		for_each_cache(ca, c, i) {  			unsigned j; @@ -1586,7 +1644,7 @@ static void run_cache_set(struct cache_set *c)  				ca->sb.d[j] = ca->sb.first_bucket + j;  		} -		bch_btree_gc_finish(c); +		bch_initial_gc_finish(c);  		err = "error starting allocator thread";  		for_each_cache(ca, c, i) @@ -1600,15 +1658,17 @@ static void run_cache_set(struct cache_set *c)  		err = "cannot allocate new UUID bucket";  		if (__uuid_write(c)) -			goto err_unlock_gc; +			goto err;  		err = "cannot allocate new btree root"; -		c->root = bch_btree_node_alloc(c, 0, &op.cl); +		c->root = bch_btree_node_alloc(c, NULL, 0);  		if (IS_ERR_OR_NULL(c->root)) -			goto err_unlock_gc; +			goto err; +		mutex_lock(&c->root->write_lock);  		bkey_copy_key(&c->root->key, &MAX_KEY); -		bch_btree_node_write(c->root, &op.cl); +		bch_btree_node_write(c->root, &cl); +		mutex_unlock(&c->root->write_lock);  		bch_btree_set_root(c->root);  		rw_unlock(true, c->root); @@ -1621,14 +1681,14 @@ static void run_cache_set(struct cache_set *c)  		SET_CACHE_SYNC(&c->sb, true);  		bch_journal_next(&c->journal); -		bch_journal_meta(c, &op.cl); - -		/* Unlock */ -		closure_set_stopped(&c->gc.cl); -		closure_put(&c->gc.cl); +		bch_journal_meta(c, &cl);  	} -	closure_sync(&op.cl); +	err = "error starting gc thread"; +	if (bch_gc_thread_start(c)) +		goto err; + +	closure_sync(&cl);  	c->sb.last_mount = get_seconds();  	bcache_write_super(c); @@ -1638,19 +1698,16 @@ static void run_cache_set(struct cache_set *c)  	flash_devs_run(c);  	return; -err_unlock_gc: -	closure_set_stopped(&c->gc.cl); -	closure_put(&c->gc.cl);  err: -	closure_sync(&op.cl); +	closure_sync(&cl);  	/* XXX: test this, it's broken */ -	bch_cache_set_error(c, err); +	bch_cache_set_error(c, "%s", err);  }  static bool can_attach_cache(struct cache *ca, struct cache_set *c)  {  	return ca->sb.block_size	== c->sb.block_size && -		ca->sb.bucket_size	== c->sb.block_size && +		ca->sb.bucket_size	== c->sb.bucket_size &&  		ca->sb.nr_in_set	== c->sb.nr_in_set;  } @@ -1721,12 +1778,11 @@ err:  void bch_cache_release(struct kobject *kobj)  {  	struct cache *ca = container_of(kobj, struct cache, kobj); +	unsigned i;  	if (ca->set)  		ca->set->cache[ca->sb.nr_this_dev] = NULL; -	bch_cache_allocator_exit(ca); -  	bio_split_pool_free(&ca->bio_split_hook);  	free_pages((unsigned long) ca->disk_buckets, ilog2(bucket_pages(ca))); @@ -1734,9 +1790,10 @@ void bch_cache_release(struct kobject *kobj)  	vfree(ca->buckets);  	free_heap(&ca->heap); -	free_fifo(&ca->unused);  	free_fifo(&ca->free_inc); -	free_fifo(&ca->free); + +	for (i = 0; i < RESERVE_NR; i++) +		free_fifo(&ca->free[i]);  	if (ca->sb_bio.bi_inline_vecs[0].bv_page)  		put_page(ca->sb_bio.bi_io_vec[0].bv_page); @@ -1758,18 +1815,17 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca)  	__module_get(THIS_MODULE);  	kobject_init(&ca->kobj, &bch_cache_ktype); -	INIT_LIST_HEAD(&ca->discards); -  	bio_init(&ca->journal.bio);  	ca->journal.bio.bi_max_vecs = 8;  	ca->journal.bio.bi_io_vec = ca->journal.bio.bi_inline_vecs; -	free = roundup_pow_of_two(ca->sb.nbuckets) >> 9; -	free = max_t(size_t, free, (prio_buckets(ca) + 8) * 2); +	free = roundup_pow_of_two(ca->sb.nbuckets) >> 10; -	if (!init_fifo(&ca->free,	free, GFP_KERNEL) || +	if (!init_fifo(&ca->free[RESERVE_BTREE], 8, GFP_KERNEL) || +	    !init_fifo(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) || +	    !init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL) || +	    !init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL) ||  	    !init_fifo(&ca->free_inc,	free << 2, GFP_KERNEL) || -	    !init_fifo(&ca->unused,	free << 2, GFP_KERNEL) ||  	    !init_heap(&ca->heap,	free << 3, GFP_KERNEL) ||  	    !(ca->buckets	= vzalloc(sizeof(struct bucket) *  					  ca->sb.nbuckets)) || @@ -1784,13 +1840,7 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca)  	for_each_bucket(b, ca)  		atomic_set(&b->pin, 0); -	if (bch_cache_allocator_init(ca)) -		goto err; -  	return 0; -err: -	kobject_put(&ca->kobj); -	return -ENOMEM;  }  static void register_cache(struct cache_sb *sb, struct page *sb_page, @@ -1819,7 +1869,10 @@ static void register_cache(struct cache_sb *sb, struct page *sb_page,  	if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache"))  		goto err; +	mutex_lock(&bch_register_lock);  	err = register_cache_set(ca); +	mutex_unlock(&bch_register_lock); +  	if (err)  		goto err; @@ -1881,8 +1934,6 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,  	if (!try_module_get(THIS_MODULE))  		return -EBUSY; -	mutex_lock(&bch_register_lock); -  	if (!(path = kstrndup(buffer, size, GFP_KERNEL)) ||  	    !(sb = kmalloc(sizeof(struct cache_sb), GFP_KERNEL)))  		goto err; @@ -1915,7 +1966,9 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,  		if (!dc)  			goto err_close; +		mutex_lock(&bch_register_lock);  		register_bdev(sb, sb_page, bdev, dc); +		mutex_unlock(&bch_register_lock);  	} else {  		struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL);  		if (!ca) @@ -1928,7 +1981,6 @@ out:  		put_page(sb_page);  	kfree(sb);  	kfree(path); -	mutex_unlock(&bch_register_lock);  	module_put(THIS_MODULE);  	return ret; @@ -2006,14 +2058,13 @@ static struct notifier_block reboot = {  static void bcache_exit(void)  {  	bch_debug_exit(); -	bch_writeback_exit();  	bch_request_exit(); -	bch_btree_exit();  	if (bcache_kobj)  		kobject_put(bcache_kobj);  	if (bcache_wq)  		destroy_workqueue(bcache_wq); -	unregister_blkdev(bcache_major, "bcache"); +	if (bcache_major) +		unregister_blkdev(bcache_major, "bcache");  	unregister_reboot_notifier(&reboot);  } @@ -2037,9 +2088,7 @@ static int __init bcache_init(void)  	if (!(bcache_wq = create_workqueue("bcache")) ||  	    !(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) ||  	    sysfs_create_files(bcache_kobj, files) || -	    bch_btree_init() ||  	    bch_request_init() || -	    bch_writeback_init() ||  	    bch_debug_init(bcache_kobj))  		goto err; diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index 4fe6ab2fbe2..b3ff57d61dd 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c @@ -21,6 +21,12 @@ static const char * const cache_replacement_policies[] = {  	NULL  }; +static const char * const error_actions[] = { +	"unregister", +	"panic", +	NULL +}; +  write_attribute(attach);  write_attribute(detach);  write_attribute(unregister); @@ -48,7 +54,6 @@ sysfs_time_stats_attribute(btree_gc,	sec, ms);  sysfs_time_stats_attribute(btree_split, sec, us);  sysfs_time_stats_attribute(btree_sort,	ms,  us);  sysfs_time_stats_attribute(btree_read,	ms,  us); -sysfs_time_stats_attribute(try_harder,	ms,  us);  read_attribute(btree_nodes);  read_attribute(btree_used_percent); @@ -66,7 +71,6 @@ rw_attribute(congested_read_threshold_us);  rw_attribute(congested_write_threshold_us);  rw_attribute(sequential_cutoff); -rw_attribute(sequential_merge);  rw_attribute(data_csum);  rw_attribute(cache_mode);  rw_attribute(writeback_metadata); @@ -78,7 +82,6 @@ rw_attribute(writeback_rate);  rw_attribute(writeback_rate_update_seconds);  rw_attribute(writeback_rate_d_term);  rw_attribute(writeback_rate_p_term_inverse); -rw_attribute(writeback_rate_d_smooth);  read_attribute(writeback_rate_debug);  read_attribute(stripe_size); @@ -90,12 +93,14 @@ rw_attribute(discard);  rw_attribute(running);  rw_attribute(label);  rw_attribute(readahead); +rw_attribute(errors);  rw_attribute(io_error_limit);  rw_attribute(io_error_halflife);  rw_attribute(verify); +rw_attribute(bypass_torture_test);  rw_attribute(key_merging_disabled);  rw_attribute(gc_always_rewrite); -rw_attribute(freelist_percent); +rw_attribute(expensive_debug_checks);  rw_attribute(cache_replacement_policy);  rw_attribute(btree_shrinker_disabled);  rw_attribute(copy_gc_enabled); @@ -116,44 +121,54 @@ SHOW(__bch_cached_dev)  	sysfs_printf(data_csum,		"%i", dc->disk.data_csum);  	var_printf(verify,		"%i"); +	var_printf(bypass_torture_test,	"%i");  	var_printf(writeback_metadata,	"%i");  	var_printf(writeback_running,	"%i");  	var_print(writeback_delay);  	var_print(writeback_percent); -	sysfs_print(writeback_rate,	dc->writeback_rate.rate); +	sysfs_hprint(writeback_rate,	dc->writeback_rate.rate << 9);  	var_print(writeback_rate_update_seconds);  	var_print(writeback_rate_d_term);  	var_print(writeback_rate_p_term_inverse); -	var_print(writeback_rate_d_smooth);  	if (attr == &sysfs_writeback_rate_debug) { +		char rate[20];  		char dirty[20]; -		char derivative[20];  		char target[20]; -		bch_hprint(dirty, -			   bcache_dev_sectors_dirty(&dc->disk) << 9); -		bch_hprint(derivative,	dc->writeback_rate_derivative << 9); +		char proportional[20]; +		char derivative[20]; +		char change[20]; +		s64 next_io; + +		bch_hprint(rate,	dc->writeback_rate.rate << 9); +		bch_hprint(dirty,	bcache_dev_sectors_dirty(&dc->disk) << 9);  		bch_hprint(target,	dc->writeback_rate_target << 9); +		bch_hprint(proportional,dc->writeback_rate_proportional << 9); +		bch_hprint(derivative,	dc->writeback_rate_derivative << 9); +		bch_hprint(change,	dc->writeback_rate_change << 9); + +		next_io = div64_s64(dc->writeback_rate.next - local_clock(), +				    NSEC_PER_MSEC);  		return sprintf(buf, -			       "rate:\t\t%u\n" -			       "change:\t\t%i\n" +			       "rate:\t\t%s/sec\n"  			       "dirty:\t\t%s\n" +			       "target:\t\t%s\n" +			       "proportional:\t%s\n"  			       "derivative:\t%s\n" -			       "target:\t\t%s\n", -			       dc->writeback_rate.rate, -			       dc->writeback_rate_change, -			       dirty, derivative, target); +			       "change:\t\t%s/sec\n" +			       "next io:\t%llims\n", +			       rate, dirty, target, proportional, +			       derivative, change, next_io);  	}  	sysfs_hprint(dirty_data,  		     bcache_dev_sectors_dirty(&dc->disk) << 9); -	sysfs_hprint(stripe_size,	(1 << dc->disk.stripe_size_bits) << 9); +	sysfs_hprint(stripe_size,	dc->disk.stripe_size << 9);  	var_printf(partial_stripes_expensive,	"%u"); -	var_printf(sequential_merge,	"%i");  	var_hprint(sequential_cutoff);  	var_hprint(readahead); @@ -181,25 +196,25 @@ STORE(__cached_dev)  	struct kobj_uevent_env *env;  #define d_strtoul(var)		sysfs_strtoul(var, dc->var) +#define d_strtoul_nonzero(var)	sysfs_strtoul_clamp(var, dc->var, 1, INT_MAX)  #define d_strtoi_h(var)		sysfs_hatoi(var, dc->var)  	sysfs_strtoul(data_csum,	dc->disk.data_csum);  	d_strtoul(verify); +	d_strtoul(bypass_torture_test);  	d_strtoul(writeback_metadata);  	d_strtoul(writeback_running);  	d_strtoul(writeback_delay); -	sysfs_strtoul_clamp(writeback_rate, -			    dc->writeback_rate.rate, 1, 1000000); +  	sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent, 0, 40); -	d_strtoul(writeback_rate_update_seconds); +	sysfs_strtoul_clamp(writeback_rate, +			    dc->writeback_rate.rate, 1, INT_MAX); + +	d_strtoul_nonzero(writeback_rate_update_seconds);  	d_strtoul(writeback_rate_d_term); -	d_strtoul(writeback_rate_p_term_inverse); -	sysfs_strtoul_clamp(writeback_rate_p_term_inverse, -			    dc->writeback_rate_p_term_inverse, 1, INT_MAX); -	d_strtoul(writeback_rate_d_smooth); +	d_strtoul_nonzero(writeback_rate_p_term_inverse); -	d_strtoul(sequential_merge);  	d_strtoi_h(sequential_cutoff);  	d_strtoi_h(readahead); @@ -223,8 +238,13 @@ STORE(__cached_dev)  	}  	if (attr == &sysfs_label) { -		/* note: endlines are preserved */ -		memcpy(dc->sb.label, buf, SB_LABEL_SIZE); +		if (size > SB_LABEL_SIZE) +			return -EINVAL; +		memcpy(dc->sb.label, buf, size); +		if (size < SB_LABEL_SIZE) +			dc->sb.label[size] = '\0'; +		if (size && dc->sb.label[size - 1] == '\n') +			dc->sb.label[size - 1] = '\0';  		bch_write_bdev_super(dc, NULL);  		if (dc->disk.c) {  			memcpy(dc->disk.c->uuids[dc->disk.id].label, @@ -300,13 +320,11 @@ static struct attribute *bch_cached_dev_files[] = {  	&sysfs_writeback_rate_update_seconds,  	&sysfs_writeback_rate_d_term,  	&sysfs_writeback_rate_p_term_inverse, -	&sysfs_writeback_rate_d_smooth,  	&sysfs_writeback_rate_debug,  	&sysfs_dirty_data,  	&sysfs_stripe_size,  	&sysfs_partial_stripes_expensive,  	&sysfs_sequential_cutoff, -	&sysfs_sequential_merge,  	&sysfs_clear_stats,  	&sysfs_running,  	&sysfs_state, @@ -314,6 +332,7 @@ static struct attribute *bch_cached_dev_files[] = {  	&sysfs_readahead,  #ifdef CONFIG_BCACHE_DEBUG  	&sysfs_verify, +	&sysfs_bypass_torture_test,  #endif  	NULL  }; @@ -361,7 +380,7 @@ STORE(__bch_flash_dev)  	}  	if (attr == &sysfs_unregister) { -		atomic_set(&d->detaching, 1); +		set_bit(BCACHE_DEV_DETACHING, &d->flags);  		bcache_device_stop(d);  	} @@ -380,81 +399,123 @@ static struct attribute *bch_flash_dev_files[] = {  };  KTYPE(bch_flash_dev); -SHOW(__bch_cache_set) +struct bset_stats_op { +	struct btree_op op; +	size_t nodes; +	struct bset_stats stats; +}; + +static int bch_btree_bset_stats(struct btree_op *b_op, struct btree *b)  { -	unsigned root_usage(struct cache_set *c) -	{ -		unsigned bytes = 0; -		struct bkey *k; -		struct btree *b; -		struct btree_iter iter; +	struct bset_stats_op *op = container_of(b_op, struct bset_stats_op, op); -		goto lock_root; +	op->nodes++; +	bch_btree_keys_stats(&b->keys, &op->stats); -		do { -			rw_unlock(false, b); -lock_root: -			b = c->root; -			rw_lock(false, b, b->level); -		} while (b != c->root); +	return MAP_CONTINUE; +} -		for_each_key_filter(b, k, &iter, bch_ptr_bad) -			bytes += bkey_bytes(k); +static int bch_bset_print_stats(struct cache_set *c, char *buf) +{ +	struct bset_stats_op op; +	int ret; + +	memset(&op, 0, sizeof(op)); +	bch_btree_op_init(&op.op, -1); + +	ret = bch_btree_map_nodes(&op.op, c, &ZERO_KEY, bch_btree_bset_stats); +	if (ret < 0) +		return ret; + +	return snprintf(buf, PAGE_SIZE, +			"btree nodes:		%zu\n" +			"written sets:		%zu\n" +			"unwritten sets:		%zu\n" +			"written key bytes:	%zu\n" +			"unwritten key bytes:	%zu\n" +			"floats:			%zu\n" +			"failed:			%zu\n", +			op.nodes, +			op.stats.sets_written, op.stats.sets_unwritten, +			op.stats.bytes_written, op.stats.bytes_unwritten, +			op.stats.floats, op.stats.failed); +} + +static unsigned bch_root_usage(struct cache_set *c) +{ +	unsigned bytes = 0; +	struct bkey *k; +	struct btree *b; +	struct btree_iter iter; + +	goto lock_root; +	do {  		rw_unlock(false, b); +lock_root: +		b = c->root; +		rw_lock(false, b, b->level); +	} while (b != c->root); -		return (bytes * 100) / btree_bytes(c); -	} +	for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad) +		bytes += bkey_bytes(k); -	size_t cache_size(struct cache_set *c) -	{ -		size_t ret = 0; -		struct btree *b; +	rw_unlock(false, b); -		mutex_lock(&c->bucket_lock); -		list_for_each_entry(b, &c->btree_cache, list) -			ret += 1 << (b->page_order + PAGE_SHIFT); +	return (bytes * 100) / btree_bytes(c); +} -		mutex_unlock(&c->bucket_lock); -		return ret; -	} +static size_t bch_cache_size(struct cache_set *c) +{ +	size_t ret = 0; +	struct btree *b; -	unsigned cache_max_chain(struct cache_set *c) -	{ -		unsigned ret = 0; -		struct hlist_head *h; +	mutex_lock(&c->bucket_lock); +	list_for_each_entry(b, &c->btree_cache, list) +		ret += 1 << (b->keys.page_order + PAGE_SHIFT); -		mutex_lock(&c->bucket_lock); +	mutex_unlock(&c->bucket_lock); +	return ret; +} -		for (h = c->bucket_hash; -		     h < c->bucket_hash + (1 << BUCKET_HASH_BITS); -		     h++) { -			unsigned i = 0; -			struct hlist_node *p; +static unsigned bch_cache_max_chain(struct cache_set *c) +{ +	unsigned ret = 0; +	struct hlist_head *h; -			hlist_for_each(p, h) -				i++; +	mutex_lock(&c->bucket_lock); -			ret = max(ret, i); -		} +	for (h = c->bucket_hash; +	     h < c->bucket_hash + (1 << BUCKET_HASH_BITS); +	     h++) { +		unsigned i = 0; +		struct hlist_node *p; -		mutex_unlock(&c->bucket_lock); -		return ret; -	} +		hlist_for_each(p, h) +			i++; -	unsigned btree_used(struct cache_set *c) -	{ -		return div64_u64(c->gc_stats.key_bytes * 100, -				 (c->gc_stats.nodes ?: 1) * btree_bytes(c)); +		ret = max(ret, i);  	} -	unsigned average_key_size(struct cache_set *c) -	{ -		return c->gc_stats.nkeys -			? div64_u64(c->gc_stats.data, c->gc_stats.nkeys) -			: 0; -	} +	mutex_unlock(&c->bucket_lock); +	return ret; +} + +static unsigned bch_btree_used(struct cache_set *c) +{ +	return div64_u64(c->gc_stats.key_bytes * 100, +			 (c->gc_stats.nodes ?: 1) * btree_bytes(c)); +} + +static unsigned bch_average_key_size(struct cache_set *c) +{ +	return c->gc_stats.nkeys +		? div64_u64(c->gc_stats.data, c->gc_stats.nkeys) +		: 0; +} +SHOW(__bch_cache_set) +{  	struct cache_set *c = container_of(kobj, struct cache_set, kobj);  	sysfs_print(synchronous,		CACHE_SYNC(&c->sb)); @@ -462,22 +523,20 @@ lock_root:  	sysfs_hprint(bucket_size,		bucket_bytes(c));  	sysfs_hprint(block_size,		block_bytes(c));  	sysfs_print(tree_depth,			c->root->level); -	sysfs_print(root_usage_percent,		root_usage(c)); +	sysfs_print(root_usage_percent,		bch_root_usage(c)); -	sysfs_hprint(btree_cache_size,		cache_size(c)); -	sysfs_print(btree_cache_max_chain,	cache_max_chain(c)); +	sysfs_hprint(btree_cache_size,		bch_cache_size(c)); +	sysfs_print(btree_cache_max_chain,	bch_cache_max_chain(c));  	sysfs_print(cache_available_percent,	100 - c->gc_stats.in_use);  	sysfs_print_time_stats(&c->btree_gc_time,	btree_gc, sec, ms);  	sysfs_print_time_stats(&c->btree_split_time,	btree_split, sec, us); -	sysfs_print_time_stats(&c->sort_time,		btree_sort, ms, us); +	sysfs_print_time_stats(&c->sort.time,		btree_sort, ms, us);  	sysfs_print_time_stats(&c->btree_read_time,	btree_read, ms, us); -	sysfs_print_time_stats(&c->try_harder_time,	try_harder, ms, us); -	sysfs_print(btree_used_percent,	btree_used(c)); +	sysfs_print(btree_used_percent,	bch_btree_used(c));  	sysfs_print(btree_nodes,	c->gc_stats.nodes); -	sysfs_hprint(dirty_data,	c->gc_stats.dirty); -	sysfs_hprint(average_key_size,	average_key_size(c)); +	sysfs_hprint(average_key_size,	bch_average_key_size(c));  	sysfs_print(cache_read_races,  		    atomic_long_read(&c->cache_read_races)); @@ -487,6 +546,10 @@ lock_root:  	sysfs_print(writeback_keys_failed,  		    atomic_long_read(&c->writeback_keys_failed)); +	if (attr == &sysfs_errors) +		return bch_snprint_string_list(buf, PAGE_SIZE, error_actions, +					       c->on_error); +  	/* See count_io_errors for why 88 */  	sysfs_print(io_error_halflife,	c->error_decay * 88);  	sysfs_print(io_error_limit,	c->error_limit >> IO_ERROR_SHIFT); @@ -501,6 +564,8 @@ lock_root:  	sysfs_print(active_journal_entries,	fifo_used(&c->journal.pin));  	sysfs_printf(verify,			"%i", c->verify);  	sysfs_printf(key_merging_disabled,	"%i", c->key_merging_disabled); +	sysfs_printf(expensive_debug_checks, +		     "%i", c->expensive_debug_checks);  	sysfs_printf(gc_always_rewrite,		"%i", c->gc_always_rewrite);  	sysfs_printf(btree_shrinker_disabled,	"%i", c->shrinker_disabled);  	sysfs_printf(copy_gc_enabled,		"%i", c->copy_gc_enabled); @@ -550,7 +615,7 @@ STORE(__bch_cache_set)  	}  	if (attr == &sysfs_trigger_gc) -		bch_queue_gc(c); +		wake_up_gc(c);  	if (attr == &sysfs_prune_cache) {  		struct shrink_control sc; @@ -564,6 +629,15 @@ STORE(__bch_cache_set)  	sysfs_strtoul(congested_write_threshold_us,  		      c->congested_write_threshold_us); +	if (attr == &sysfs_errors) { +		ssize_t v = bch_read_string_list(buf, error_actions); + +		if (v < 0) +			return v; + +		c->on_error = v; +	} +  	if (attr == &sysfs_io_error_limit)  		c->error_limit = strtoul_or_return(buf) << IO_ERROR_SHIFT; @@ -574,6 +648,7 @@ STORE(__bch_cache_set)  	sysfs_strtoul(journal_delay_ms,		c->journal_delay_ms);  	sysfs_strtoul(verify,			c->verify);  	sysfs_strtoul(key_merging_disabled,	c->key_merging_disabled); +	sysfs_strtoul(expensive_debug_checks,	c->expensive_debug_checks);  	sysfs_strtoul(gc_always_rewrite,	c->gc_always_rewrite);  	sysfs_strtoul(btree_shrinker_disabled,	c->shrinker_disabled);  	sysfs_strtoul(copy_gc_enabled,		c->copy_gc_enabled); @@ -613,8 +688,8 @@ static struct attribute *bch_cache_set_files[] = {  	&sysfs_cache_available_percent,  	&sysfs_average_key_size, -	&sysfs_dirty_data, +	&sysfs_errors,  	&sysfs_io_error_limit,  	&sysfs_io_error_halflife,  	&sysfs_congested, @@ -632,7 +707,6 @@ static struct attribute *bch_cache_set_internal_files[] = {  	sysfs_time_stats_attribute_list(btree_split, sec, us)  	sysfs_time_stats_attribute_list(btree_sort, ms, us)  	sysfs_time_stats_attribute_list(btree_read, ms, us) -	sysfs_time_stats_attribute_list(try_harder, ms, us)  	&sysfs_btree_nodes,  	&sysfs_btree_used_percent, @@ -648,6 +722,7 @@ static struct attribute *bch_cache_set_internal_files[] = {  #ifdef CONFIG_BCACHE_DEBUG  	&sysfs_verify,  	&sysfs_key_merging_disabled, +	&sysfs_expensive_debug_checks,  #endif  	&sysfs_gc_always_rewrite,  	&sysfs_btree_shrinker_disabled, @@ -674,9 +749,6 @@ SHOW(__bch_cache)  	sysfs_print(io_errors,  		    atomic_read(&ca->io_errors) >> IO_ERROR_SHIFT); -	sysfs_print(freelist_percent, ca->free.size * 100 / -		    ((size_t) ca->sb.nbuckets)); -  	if (attr == &sysfs_cache_replacement_policy)  		return bch_snprint_string_list(buf, PAGE_SIZE,  					       cache_replacement_policies, @@ -686,7 +758,9 @@ SHOW(__bch_cache)  		int cmp(const void *l, const void *r)  		{	return *((uint16_t *) r) - *((uint16_t *) l); } -		size_t n = ca->sb.nbuckets, i, unused, btree; +		struct bucket *b; +		size_t n = ca->sb.nbuckets, i; +		size_t unused = 0, available = 0, dirty = 0, meta = 0;  		uint64_t sum = 0;  		/* Compute 31 quantiles */  		uint16_t q[31], *p, *cached; @@ -697,6 +771,17 @@ SHOW(__bch_cache)  			return -ENOMEM;  		mutex_lock(&ca->set->bucket_lock); +		for_each_bucket(b, ca) { +			if (!GC_SECTORS_USED(b)) +				unused++; +			if (GC_MARK(b) == GC_MARK_RECLAIMABLE) +				available++; +			if (GC_MARK(b) == GC_MARK_DIRTY) +				dirty++; +			if (GC_MARK(b) == GC_MARK_METADATA) +				meta++; +		} +  		for (i = ca->sb.first_bucket; i < n; i++)  			p[i] = ca->buckets[i].prio;  		mutex_unlock(&ca->set->bucket_lock); @@ -711,10 +796,7 @@ SHOW(__bch_cache)  		while (cached < p + n &&  		       *cached == BTREE_PRIO) -			cached++; - -		btree = cached - p; -		n -= btree; +			cached++, n--;  		for (i = 0; i < n; i++)  			sum += INITIAL_PRIO - cached[i]; @@ -730,12 +812,16 @@ SHOW(__bch_cache)  		ret = scnprintf(buf, PAGE_SIZE,  				"Unused:		%zu%%\n" +				"Clean:		%zu%%\n" +				"Dirty:		%zu%%\n"  				"Metadata:	%zu%%\n"  				"Average:	%llu\n"  				"Sectors per Q:	%zu\n"  				"Quantiles:	[",  				unused * 100 / (size_t) ca->sb.nbuckets, -				btree * 100 / (size_t) ca->sb.nbuckets, sum, +				available * 100 / (size_t) ca->sb.nbuckets, +				dirty * 100 / (size_t) ca->sb.nbuckets, +				meta * 100 / (size_t) ca->sb.nbuckets, sum,  				n * ca->sb.bucket_size / (ARRAY_SIZE(q) + 1));  		for (i = 0; i < ARRAY_SIZE(q); i++) @@ -783,32 +869,6 @@ STORE(__bch_cache)  		}  	} -	if (attr == &sysfs_freelist_percent) { -		DECLARE_FIFO(long, free); -		long i; -		size_t p = strtoul_or_return(buf); - -		p = clamp_t(size_t, -			    ((size_t) ca->sb.nbuckets * p) / 100, -			    roundup_pow_of_two(ca->sb.nbuckets) >> 9, -			    ca->sb.nbuckets / 2); - -		if (!init_fifo_exact(&free, p, GFP_KERNEL)) -			return -ENOMEM; - -		mutex_lock(&ca->set->bucket_lock); - -		fifo_move(&free, &ca->free); -		fifo_swap(&free, &ca->free); - -		mutex_unlock(&ca->set->bucket_lock); - -		while (fifo_pop(&free, i)) -			atomic_dec(&ca->buckets[i].pin); - -		free_fifo(&free); -	} -  	if (attr == &sysfs_clear_stats) {  		atomic_long_set(&ca->sectors_written, 0);  		atomic_long_set(&ca->btree_sectors_written, 0); @@ -832,7 +892,6 @@ static struct attribute *bch_cache_files[] = {  	&sysfs_metadata_written,  	&sysfs_io_errors,  	&sysfs_clear_stats, -	&sysfs_freelist_percent,  	&sysfs_cache_replacement_policy,  	NULL  }; diff --git a/drivers/md/bcache/trace.c b/drivers/md/bcache/trace.c index f7b6c197f90..b7820b0d262 100644 --- a/drivers/md/bcache/trace.c +++ b/drivers/md/bcache/trace.c @@ -1,6 +1,5 @@  #include "bcache.h"  #include "btree.h" -#include "request.h"  #include <linux/blktrace_api.h>  #include <linux/module.h> @@ -46,7 +45,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_node_split);  EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_node_compact);  EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_set_root); -EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_alloc_invalidate); +EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_invalidate);  EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_alloc_fail);  EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_writeback); diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c index 98eb81159a2..db3ae4c2b22 100644 --- a/drivers/md/bcache/util.c +++ b/drivers/md/bcache/util.c @@ -168,10 +168,14 @@ int bch_parse_uuid(const char *s, char *uuid)  void bch_time_stats_update(struct time_stats *stats, uint64_t start_time)  { -	uint64_t now		= local_clock(); -	uint64_t duration	= time_after64(now, start_time) +	uint64_t now, duration, last; + +	spin_lock(&stats->lock); + +	now		= local_clock(); +	duration	= time_after64(now, start_time)  		? now - start_time : 0; -	uint64_t last		= time_after64(now, stats->last) +	last		= time_after64(now, stats->last)  		? now - stats->last : 0;  	stats->max_duration = max(stats->max_duration, duration); @@ -188,13 +192,30 @@ void bch_time_stats_update(struct time_stats *stats, uint64_t start_time)  	}  	stats->last = now ?: 1; + +	spin_unlock(&stats->lock);  } -unsigned bch_next_delay(struct ratelimit *d, uint64_t done) +/** + * bch_next_delay() - increment @d by the amount of work done, and return how + * long to delay until the next time to do some work. + * + * @d - the struct bch_ratelimit to update + * @done - the amount of work done, in arbitrary units + * + * Returns the amount of time to delay by, in jiffies + */ +uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done)  {  	uint64_t now = local_clock(); -	d->next += div_u64(done, d->rate); +	d->next += div_u64(done * NSEC_PER_SEC, d->rate); + +	if (time_before64(now + NSEC_PER_SEC, d->next)) +		d->next = now + NSEC_PER_SEC; + +	if (time_after64(now - NSEC_PER_SEC * 2, d->next)) +		d->next = now - NSEC_PER_SEC * 2;  	return time_after64(d->next, now)  		? div_u64(d->next - now, NSEC_PER_SEC / HZ) @@ -203,10 +224,10 @@ unsigned bch_next_delay(struct ratelimit *d, uint64_t done)  void bch_bio_map(struct bio *bio, void *base)  { -	size_t size = bio->bi_size; +	size_t size = bio->bi_iter.bi_size;  	struct bio_vec *bv = bio->bi_io_vec; -	BUG_ON(!bio->bi_size); +	BUG_ON(!bio->bi_iter.bi_size);  	BUG_ON(bio->bi_vcnt);  	bv->bv_offset = base ? ((unsigned long) base) % PAGE_SIZE : 0; diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h index 1ae2a73ad85..ac7d0d1f70d 100644 --- a/drivers/md/bcache/util.h +++ b/drivers/md/bcache/util.h @@ -2,6 +2,7 @@  #ifndef _BCACHE_UTIL_H  #define _BCACHE_UTIL_H +#include <linux/blkdev.h>  #include <linux/errno.h>  #include <linux/kernel.h>  #include <linux/llist.h> @@ -15,28 +16,20 @@  struct closure; -#ifdef CONFIG_BCACHE_EDEBUG +#ifdef CONFIG_BCACHE_DEBUG +#define EBUG_ON(cond)			BUG_ON(cond)  #define atomic_dec_bug(v)	BUG_ON(atomic_dec_return(v) < 0)  #define atomic_inc_bug(v, i)	BUG_ON(atomic_inc_return(v) <= i) -#else /* EDEBUG */ +#else /* DEBUG */ +#define EBUG_ON(cond)			do { if (cond); } while (0)  #define atomic_dec_bug(v)	atomic_dec(v)  #define atomic_inc_bug(v, i)	atomic_inc(v)  #endif -#define BITMASK(name, type, field, offset, size)		\ -static inline uint64_t name(const type *k)			\ -{ return (k->field >> offset) & ~(((uint64_t) ~0) << size); }	\ -								\ -static inline void SET_##name(type *k, uint64_t v)		\ -{								\ -	k->field &= ~(~((uint64_t) ~0 << size) << offset);	\ -	k->field |= v << offset;				\ -} -  #define DECLARE_HEAP(type, name)					\  	struct {							\  		size_t size, used;					\ @@ -120,7 +113,7 @@ do {									\  	_r;								\  }) -#define heap_peek(h)	((h)->size ? (h)->data[0] : NULL) +#define heap_peek(h)	((h)->used ? (h)->data[0] : NULL)  #define heap_full(h)	((h)->used == (h)->size) @@ -388,6 +381,7 @@ ssize_t bch_snprint_string_list(char *buf, size_t size, const char * const list[  ssize_t bch_read_string_list(const char *buf, const char * const list[]);  struct time_stats { +	spinlock_t	lock;  	/*  	 * all fields are in nanoseconds, averages are ewmas stored left shifted  	 * by 8 @@ -400,6 +394,11 @@ struct time_stats {  void bch_time_stats_update(struct time_stats *stats, uint64_t time); +static inline unsigned local_clock_us(void) +{ +	return local_clock() >> 10; +} +  #define NSEC_PER_ns			1L  #define NSEC_PER_us			NSEC_PER_USEC  #define NSEC_PER_ms			NSEC_PER_MSEC @@ -450,17 +449,23 @@ read_attribute(name ## _last_ ## frequency_units)  	(ewma) >> factor;						\  }) -struct ratelimit { +struct bch_ratelimit { +	/* Next time we want to do some work, in nanoseconds */  	uint64_t		next; + +	/* +	 * Rate at which we want to do work, in units per nanosecond +	 * The units here correspond to the units passed to bch_next_delay() +	 */  	unsigned		rate;  }; -static inline void ratelimit_reset(struct ratelimit *d) +static inline void bch_ratelimit_reset(struct bch_ratelimit *d)  {  	d->next = local_clock();  } -unsigned bch_next_delay(struct ratelimit *d, uint64_t done); +uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done);  #define __DIV_SAFE(n, d, zero)						\  ({									\ diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index 22cbff55162..f4300e4c011 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -11,18 +11,11 @@  #include "debug.h"  #include "writeback.h" +#include <linux/delay.h> +#include <linux/freezer.h> +#include <linux/kthread.h>  #include <trace/events/bcache.h> -static struct workqueue_struct *dirty_wq; - -static void read_dirty(struct closure *); - -struct dirty_io { -	struct closure		cl; -	struct cached_dev	*dc; -	struct bio		bio; -}; -  /* Rate limiting */  static void __update_writeback_rate(struct cached_dev *dc) @@ -37,44 +30,43 @@ static void __update_writeback_rate(struct cached_dev *dc)  	/* PD controller */ -	int change = 0; -	int64_t error;  	int64_t dirty = bcache_dev_sectors_dirty(&dc->disk);  	int64_t derivative = dirty - dc->disk.sectors_dirty_last; +	int64_t proportional = dirty - target; +	int64_t change;  	dc->disk.sectors_dirty_last = dirty; -	derivative *= dc->writeback_rate_d_term; -	derivative = clamp(derivative, -dirty, dirty); +	/* Scale to sectors per second */ -	derivative = ewma_add(dc->disk.sectors_dirty_derivative, derivative, -			      dc->writeback_rate_d_smooth, 0); +	proportional *= dc->writeback_rate_update_seconds; +	proportional = div_s64(proportional, dc->writeback_rate_p_term_inverse); -	/* Avoid divide by zero */ -	if (!target) -		goto out; +	derivative = div_s64(derivative, dc->writeback_rate_update_seconds); -	error = div64_s64((dirty + derivative - target) << 8, target); +	derivative = ewma_add(dc->disk.sectors_dirty_derivative, derivative, +			      (dc->writeback_rate_d_term / +			       dc->writeback_rate_update_seconds) ?: 1, 0); -	change = div_s64((dc->writeback_rate.rate * error) >> 8, -			 dc->writeback_rate_p_term_inverse); +	derivative *= dc->writeback_rate_d_term; +	derivative = div_s64(derivative, dc->writeback_rate_p_term_inverse); + +	change = proportional + derivative;  	/* Don't increase writeback rate if the device isn't keeping up */  	if (change > 0 &&  	    time_after64(local_clock(), -			 dc->writeback_rate.next + 10 * NSEC_PER_MSEC)) +			 dc->writeback_rate.next + NSEC_PER_MSEC))  		change = 0;  	dc->writeback_rate.rate = -		clamp_t(int64_t, dc->writeback_rate.rate + change, +		clamp_t(int64_t, (int64_t) dc->writeback_rate.rate + change,  			1, NSEC_PER_MSEC); -out: + +	dc->writeback_rate_proportional = proportional;  	dc->writeback_rate_derivative = derivative;  	dc->writeback_rate_change = change;  	dc->writeback_rate_target = target; - -	schedule_delayed_work(&dc->writeback_rate_update, -			      dc->writeback_rate_update_seconds * HZ);  }  static void update_writeback_rate(struct work_struct *work) @@ -90,48 +82,25 @@ static void update_writeback_rate(struct work_struct *work)  		__update_writeback_rate(dc);  	up_read(&dc->writeback_lock); + +	schedule_delayed_work(&dc->writeback_rate_update, +			      dc->writeback_rate_update_seconds * HZ);  }  static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors)  { -	if (atomic_read(&dc->disk.detaching) || +	if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||  	    !dc->writeback_percent)  		return 0; -	return bch_next_delay(&dc->writeback_rate, sectors * 10000000ULL); +	return bch_next_delay(&dc->writeback_rate, sectors);  } -/* Background writeback */ - -static bool dirty_pred(struct keybuf *buf, struct bkey *k) -{ -	return KEY_DIRTY(k); -} - -static bool dirty_full_stripe_pred(struct keybuf *buf, struct bkey *k) -{ -	uint64_t stripe; -	unsigned nr_sectors = KEY_SIZE(k); -	struct cached_dev *dc = container_of(buf, struct cached_dev, -					     writeback_keys); -	unsigned stripe_size = 1 << dc->disk.stripe_size_bits; - -	if (!KEY_DIRTY(k)) -		return false; - -	stripe = KEY_START(k) >> dc->disk.stripe_size_bits; -	while (1) { -		if (atomic_read(dc->disk.stripe_sectors_dirty + stripe) != -		    stripe_size) -			return false; - -		if (nr_sectors <= stripe_size) -			return true; - -		nr_sectors -= stripe_size; -		stripe++; -	} -} +struct dirty_io { +	struct closure		cl; +	struct cached_dev	*dc; +	struct bio		bio; +};  static void dirty_init(struct keybuf_key *w)  { @@ -142,138 +111,13 @@ static void dirty_init(struct keybuf_key *w)  	if (!io->dc->writeback_percent)  		bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); -	bio->bi_size		= KEY_SIZE(&w->key) << 9; +	bio->bi_iter.bi_size	= KEY_SIZE(&w->key) << 9;  	bio->bi_max_vecs	= DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS);  	bio->bi_private		= w;  	bio->bi_io_vec		= bio->bi_inline_vecs;  	bch_bio_map(bio, NULL);  } -static void refill_dirty(struct closure *cl) -{ -	struct cached_dev *dc = container_of(cl, struct cached_dev, -					     writeback.cl); -	struct keybuf *buf = &dc->writeback_keys; -	bool searched_from_start = false; -	struct bkey end = MAX_KEY; -	SET_KEY_INODE(&end, dc->disk.id); - -	if (!atomic_read(&dc->disk.detaching) && -	    !dc->writeback_running) -		closure_return(cl); - -	down_write(&dc->writeback_lock); - -	if (!atomic_read(&dc->has_dirty)) { -		SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN); -		bch_write_bdev_super(dc, NULL); - -		up_write(&dc->writeback_lock); -		closure_return(cl); -	} - -	if (bkey_cmp(&buf->last_scanned, &end) >= 0) { -		buf->last_scanned = KEY(dc->disk.id, 0, 0); -		searched_from_start = true; -	} - -	if (dc->partial_stripes_expensive) { -		uint64_t i; - -		for (i = 0; i < dc->disk.nr_stripes; i++) -			if (atomic_read(dc->disk.stripe_sectors_dirty + i) == -			    1 << dc->disk.stripe_size_bits) -				goto full_stripes; - -		goto normal_refill; -full_stripes: -		bch_refill_keybuf(dc->disk.c, buf, &end, -				  dirty_full_stripe_pred); -	} else { -normal_refill: -		bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred); -	} - -	if (bkey_cmp(&buf->last_scanned, &end) >= 0 && searched_from_start) { -		/* Searched the entire btree  - delay awhile */ - -		if (RB_EMPTY_ROOT(&buf->keys)) { -			atomic_set(&dc->has_dirty, 0); -			cached_dev_put(dc); -		} - -		if (!atomic_read(&dc->disk.detaching)) -			closure_delay(&dc->writeback, dc->writeback_delay * HZ); -	} - -	up_write(&dc->writeback_lock); - -	ratelimit_reset(&dc->writeback_rate); - -	/* Punt to workqueue only so we don't recurse and blow the stack */ -	continue_at(cl, read_dirty, dirty_wq); -} - -void bch_writeback_queue(struct cached_dev *dc) -{ -	if (closure_trylock(&dc->writeback.cl, &dc->disk.cl)) { -		if (!atomic_read(&dc->disk.detaching)) -			closure_delay(&dc->writeback, dc->writeback_delay * HZ); - -		continue_at(&dc->writeback.cl, refill_dirty, dirty_wq); -	} -} - -void bch_writeback_add(struct cached_dev *dc) -{ -	if (!atomic_read(&dc->has_dirty) && -	    !atomic_xchg(&dc->has_dirty, 1)) { -		atomic_inc(&dc->count); - -		if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) { -			SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY); -			/* XXX: should do this synchronously */ -			bch_write_bdev_super(dc, NULL); -		} - -		bch_writeback_queue(dc); - -		if (dc->writeback_percent) -			schedule_delayed_work(&dc->writeback_rate_update, -				      dc->writeback_rate_update_seconds * HZ); -	} -} - -void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode, -				  uint64_t offset, int nr_sectors) -{ -	struct bcache_device *d = c->devices[inode]; -	unsigned stripe_size, stripe_offset; -	uint64_t stripe; - -	if (!d) -		return; - -	stripe_size = 1 << d->stripe_size_bits; -	stripe = offset >> d->stripe_size_bits; -	stripe_offset = offset & (stripe_size - 1); - -	while (nr_sectors) { -		int s = min_t(unsigned, abs(nr_sectors), -			      stripe_size - stripe_offset); - -		if (nr_sectors < 0) -			s = -s; - -		atomic_add(s, d->stripe_sectors_dirty + stripe); -		nr_sectors -= s; -		stripe_offset = 0; -		stripe++; -	} -} - -/* Background writeback - IO loop */ -  static void dirty_io_destructor(struct closure *cl)  {  	struct dirty_io *io = container_of(cl, struct dirty_io, cl); @@ -293,34 +137,31 @@ static void write_dirty_finish(struct closure *cl)  	/* This is kind of a dumb way of signalling errors. */  	if (KEY_DIRTY(&w->key)) { +		int ret;  		unsigned i; -		struct btree_op op; -		bch_btree_op_init_stack(&op); +		struct keylist keys; -		op.type = BTREE_REPLACE; -		bkey_copy(&op.replace, &w->key); +		bch_keylist_init(&keys); -		SET_KEY_DIRTY(&w->key, false); -		bch_keylist_add(&op.keys, &w->key); +		bkey_copy(keys.top, &w->key); +		SET_KEY_DIRTY(keys.top, false); +		bch_keylist_push(&keys);  		for (i = 0; i < KEY_PTRS(&w->key); i++)  			atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin); -		bch_btree_insert(&op, dc->disk.c); -		closure_sync(&op.cl); +		ret = bch_btree_insert(dc->disk.c, &keys, NULL, &w->key); -		if (op.insert_collision) +		if (ret)  			trace_bcache_writeback_collision(&w->key); -		atomic_long_inc(op.insert_collision +		atomic_long_inc(ret  				? &dc->disk.c->writeback_keys_failed  				: &dc->disk.c->writeback_keys_done);  	}  	bch_keybuf_del(&dc->writeback_keys, w); -	atomic_dec_bug(&dc->in_flight); - -	closure_wake_up(&dc->writeback_wait); +	up(&dc->in_flight);  	closure_return_with_destructor(cl, dirty_io_destructor);  } @@ -343,13 +184,13 @@ static void write_dirty(struct closure *cl)  	dirty_init(w);  	io->bio.bi_rw		= WRITE; -	io->bio.bi_sector	= KEY_START(&w->key); +	io->bio.bi_iter.bi_sector = KEY_START(&w->key);  	io->bio.bi_bdev		= io->dc->bdev;  	io->bio.bi_end_io	= dirty_endio;  	closure_bio_submit(&io->bio, cl, &io->dc->disk); -	continue_at(cl, write_dirty_finish, dirty_wq); +	continue_at(cl, write_dirty_finish, system_wq);  }  static void read_dirty_endio(struct bio *bio, int error) @@ -369,37 +210,36 @@ static void read_dirty_submit(struct closure *cl)  	closure_bio_submit(&io->bio, cl, &io->dc->disk); -	continue_at(cl, write_dirty, dirty_wq); +	continue_at(cl, write_dirty, system_wq);  } -static void read_dirty(struct closure *cl) +static void read_dirty(struct cached_dev *dc)  { -	struct cached_dev *dc = container_of(cl, struct cached_dev, -					     writeback.cl); -	unsigned delay = writeback_delay(dc, 0); +	unsigned delay = 0;  	struct keybuf_key *w;  	struct dirty_io *io; +	struct closure cl; + +	closure_init_stack(&cl);  	/*  	 * XXX: if we error, background writeback just spins. Should use some  	 * mempools.  	 */ -	while (1) { +	while (!kthread_should_stop()) { +		try_to_freeze(); +  		w = bch_keybuf_next(&dc->writeback_keys);  		if (!w)  			break;  		BUG_ON(ptr_stale(dc->disk.c, &w->key, 0)); -		if (delay > 0 && -		    (KEY_START(&w->key) != dc->last_read || -		     jiffies_to_msecs(delay) > 50)) { -			w->private = NULL; - -			closure_delay(&dc->writeback, delay); -			continue_at(cl, read_dirty, dirty_wq); -		} +		if (KEY_START(&w->key) != dc->last_read || +		    jiffies_to_msecs(delay) > 50) +			while (!kthread_should_stop() && delay) +				delay = schedule_timeout_uninterruptible(delay);  		dc->last_read	= KEY_OFFSET(&w->key); @@ -413,7 +253,7 @@ static void read_dirty(struct closure *cl)  		io->dc		= dc;  		dirty_init(w); -		io->bio.bi_sector	= PTR_OFFSET(&w->key, 0); +		io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0);  		io->bio.bi_bdev		= PTR_CACHE(dc->disk.c,  						    &w->key, 0)->bdev;  		io->bio.bi_rw		= READ; @@ -424,15 +264,10 @@ static void read_dirty(struct closure *cl)  		trace_bcache_writeback(&w->key); -		closure_call(&io->cl, read_dirty_submit, NULL, &dc->disk.cl); +		down(&dc->in_flight); +		closure_call(&io->cl, read_dirty_submit, NULL, &cl);  		delay = writeback_delay(dc, KEY_SIZE(&w->key)); - -		atomic_inc(&dc->in_flight); - -		if (!closure_wait_event(&dc->writeback_wait, cl, -					atomic_read(&dc->in_flight) < 64)) -			continue_at(cl, read_dirty, dirty_wq);  	}  	if (0) { @@ -442,51 +277,211 @@ err:  		bch_keybuf_del(&dc->writeback_keys, w);  	} -	refill_dirty(cl); +	/* +	 * Wait for outstanding writeback IOs to finish (and keybuf slots to be +	 * freed) before refilling again +	 */ +	closure_sync(&cl);  } -/* Init */ +/* Scan for dirty data */ + +void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode, +				  uint64_t offset, int nr_sectors) +{ +	struct bcache_device *d = c->devices[inode]; +	unsigned stripe_offset, stripe, sectors_dirty; + +	if (!d) +		return; + +	stripe = offset_to_stripe(d, offset); +	stripe_offset = offset & (d->stripe_size - 1); + +	while (nr_sectors) { +		int s = min_t(unsigned, abs(nr_sectors), +			      d->stripe_size - stripe_offset); + +		if (nr_sectors < 0) +			s = -s; + +		if (stripe >= d->nr_stripes) +			return; + +		sectors_dirty = atomic_add_return(s, +					d->stripe_sectors_dirty + stripe); +		if (sectors_dirty == d->stripe_size) +			set_bit(stripe, d->full_dirty_stripes); +		else +			clear_bit(stripe, d->full_dirty_stripes); + +		nr_sectors -= s; +		stripe_offset = 0; +		stripe++; +	} +} + +static bool dirty_pred(struct keybuf *buf, struct bkey *k) +{ +	return KEY_DIRTY(k); +} + +static void refill_full_stripes(struct cached_dev *dc) +{ +	struct keybuf *buf = &dc->writeback_keys; +	unsigned start_stripe, stripe, next_stripe; +	bool wrapped = false; + +	stripe = offset_to_stripe(&dc->disk, KEY_OFFSET(&buf->last_scanned)); + +	if (stripe >= dc->disk.nr_stripes) +		stripe = 0; + +	start_stripe = stripe; + +	while (1) { +		stripe = find_next_bit(dc->disk.full_dirty_stripes, +				       dc->disk.nr_stripes, stripe); + +		if (stripe == dc->disk.nr_stripes) +			goto next; + +		next_stripe = find_next_zero_bit(dc->disk.full_dirty_stripes, +						 dc->disk.nr_stripes, stripe); + +		buf->last_scanned = KEY(dc->disk.id, +					stripe * dc->disk.stripe_size, 0); + +		bch_refill_keybuf(dc->disk.c, buf, +				  &KEY(dc->disk.id, +				       next_stripe * dc->disk.stripe_size, 0), +				  dirty_pred); + +		if (array_freelist_empty(&buf->freelist)) +			return; -static int bch_btree_sectors_dirty_init(struct btree *b, struct btree_op *op, -					struct cached_dev *dc) +		stripe = next_stripe; +next: +		if (wrapped && stripe > start_stripe) +			return; + +		if (stripe == dc->disk.nr_stripes) { +			stripe = 0; +			wrapped = true; +		} +	} +} + +static bool refill_dirty(struct cached_dev *dc)  { -	struct bkey *k; -	struct btree_iter iter; - -	bch_btree_iter_init(b, &iter, &KEY(dc->disk.id, 0, 0)); -	while ((k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad))) -		if (!b->level) { -			if (KEY_INODE(k) > dc->disk.id) -				break; - -			if (KEY_DIRTY(k)) -				bcache_dev_sectors_dirty_add(b->c, dc->disk.id, -							     KEY_START(k), -							     KEY_SIZE(k)); -		} else { -			btree(sectors_dirty_init, k, b, op, dc); -			if (KEY_INODE(k) > dc->disk.id) -				break; - -			cond_resched(); +	struct keybuf *buf = &dc->writeback_keys; +	struct bkey end = KEY(dc->disk.id, MAX_KEY_OFFSET, 0); +	bool searched_from_start = false; + +	if (dc->partial_stripes_expensive) { +		refill_full_stripes(dc); +		if (array_freelist_empty(&buf->freelist)) +			return false; +	} + +	if (bkey_cmp(&buf->last_scanned, &end) >= 0) { +		buf->last_scanned = KEY(dc->disk.id, 0, 0); +		searched_from_start = true; +	} + +	bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred); + +	return bkey_cmp(&buf->last_scanned, &end) >= 0 && searched_from_start; +} + +static int bch_writeback_thread(void *arg) +{ +	struct cached_dev *dc = arg; +	bool searched_full_index; + +	while (!kthread_should_stop()) { +		down_write(&dc->writeback_lock); +		if (!atomic_read(&dc->has_dirty) || +		    (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) && +		     !dc->writeback_running)) { +			up_write(&dc->writeback_lock); +			set_current_state(TASK_INTERRUPTIBLE); + +			if (kthread_should_stop()) +				return 0; + +			try_to_freeze(); +			schedule(); +			continue; +		} + +		searched_full_index = refill_dirty(dc); + +		if (searched_full_index && +		    RB_EMPTY_ROOT(&dc->writeback_keys.keys)) { +			atomic_set(&dc->has_dirty, 0); +			cached_dev_put(dc); +			SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN); +			bch_write_bdev_super(dc, NULL);  		} +		up_write(&dc->writeback_lock); + +		bch_ratelimit_reset(&dc->writeback_rate); +		read_dirty(dc); + +		if (searched_full_index) { +			unsigned delay = dc->writeback_delay * HZ; + +			while (delay && +			       !kthread_should_stop() && +			       !test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) +				delay = schedule_timeout_uninterruptible(delay); +		} +	} +  	return 0;  } +/* Init */ + +struct sectors_dirty_init { +	struct btree_op	op; +	unsigned	inode; +}; + +static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b, +				 struct bkey *k) +{ +	struct sectors_dirty_init *op = container_of(_op, +						struct sectors_dirty_init, op); +	if (KEY_INODE(k) > op->inode) +		return MAP_DONE; + +	if (KEY_DIRTY(k)) +		bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k), +					     KEY_START(k), KEY_SIZE(k)); + +	return MAP_CONTINUE; +} +  void bch_sectors_dirty_init(struct cached_dev *dc)  { -	struct btree_op op; +	struct sectors_dirty_init op; + +	bch_btree_op_init(&op.op, -1); +	op.inode = dc->disk.id; -	bch_btree_op_init_stack(&op); -	btree_root(sectors_dirty_init, dc->disk.c, &op, dc); +	bch_btree_map_keys(&op.op, dc->disk.c, &KEY(op.inode, 0, 0), +			   sectors_dirty_init_fn, 0); + +	dc->disk.sectors_dirty_last = bcache_dev_sectors_dirty(&dc->disk);  } -void bch_cached_dev_writeback_init(struct cached_dev *dc) +int bch_cached_dev_writeback_init(struct cached_dev *dc)  { -	closure_init_unlocked(&dc->writeback); +	sema_init(&dc->in_flight, 64);  	init_rwsem(&dc->writeback_lock); -  	bch_keybuf_init(&dc->writeback_keys);  	dc->writeback_metadata		= true; @@ -495,27 +490,18 @@ void bch_cached_dev_writeback_init(struct cached_dev *dc)  	dc->writeback_delay		= 30;  	dc->writeback_rate.rate		= 1024; -	dc->writeback_rate_update_seconds = 30; -	dc->writeback_rate_d_term	= 16; -	dc->writeback_rate_p_term_inverse = 64; -	dc->writeback_rate_d_smooth	= 8; +	dc->writeback_rate_update_seconds = 5; +	dc->writeback_rate_d_term	= 30; +	dc->writeback_rate_p_term_inverse = 6000; + +	dc->writeback_thread = kthread_create(bch_writeback_thread, dc, +					      "bcache_writeback"); +	if (IS_ERR(dc->writeback_thread)) +		return PTR_ERR(dc->writeback_thread);  	INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate);  	schedule_delayed_work(&dc->writeback_rate_update,  			      dc->writeback_rate_update_seconds * HZ); -} - -void bch_writeback_exit(void) -{ -	if (dirty_wq) -		destroy_workqueue(dirty_wq); -} - -int __init bch_writeback_init(void) -{ -	dirty_wq = create_singlethread_workqueue("bcache_writeback"); -	if (!dirty_wq) -		return -ENOMEM;  	return 0;  } diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h index c91f61bb95b..e2f8598937a 100644 --- a/drivers/md/bcache/writeback.h +++ b/drivers/md/bcache/writeback.h @@ -14,20 +14,27 @@ static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)  	return ret;  } -static inline bool bcache_dev_stripe_dirty(struct bcache_device *d, +static inline unsigned offset_to_stripe(struct bcache_device *d, +					uint64_t offset) +{ +	do_div(offset, d->stripe_size); +	return offset; +} + +static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc,  					   uint64_t offset,  					   unsigned nr_sectors)  { -	uint64_t stripe = offset >> d->stripe_size_bits; +	unsigned stripe = offset_to_stripe(&dc->disk, offset);  	while (1) { -		if (atomic_read(d->stripe_sectors_dirty + stripe)) +		if (atomic_read(dc->disk.stripe_sectors_dirty + stripe))  			return true; -		if (nr_sectors <= 1 << d->stripe_size_bits) +		if (nr_sectors <= dc->disk.stripe_size)  			return false; -		nr_sectors -= 1 << d->stripe_size_bits; +		nr_sectors -= dc->disk.stripe_size;  		stripe++;  	}  } @@ -38,12 +45,12 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,  	unsigned in_use = dc->disk.c->gc_stats.in_use;  	if (cache_mode != CACHE_MODE_WRITEBACK || -	    atomic_read(&dc->disk.detaching) || +	    test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||  	    in_use > CUTOFF_WRITEBACK_SYNC)  		return false;  	if (dc->partial_stripes_expensive && -	    bcache_dev_stripe_dirty(&dc->disk, bio->bi_sector, +	    bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector,  				    bio_sectors(bio)))  		return true; @@ -54,11 +61,30 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,  		in_use <= CUTOFF_WRITEBACK;  } +static inline void bch_writeback_queue(struct cached_dev *dc) +{ +	wake_up_process(dc->writeback_thread); +} + +static inline void bch_writeback_add(struct cached_dev *dc) +{ +	if (!atomic_read(&dc->has_dirty) && +	    !atomic_xchg(&dc->has_dirty, 1)) { +		atomic_inc(&dc->count); + +		if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) { +			SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY); +			/* XXX: should do this synchronously */ +			bch_write_bdev_super(dc, NULL); +		} + +		bch_writeback_queue(dc); +	} +} +  void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned, uint64_t, int); -void bch_writeback_queue(struct cached_dev *); -void bch_writeback_add(struct cached_dev *);  void bch_sectors_dirty_init(struct cached_dev *dc); -void bch_cached_dev_writeback_init(struct cached_dev *); +int bch_cached_dev_writeback_init(struct cached_dev *);  #endif diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index a7fd82133b1..67f8b31e205 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -669,17 +669,13 @@ static inline unsigned long file_page_offset(struct bitmap_storage *store,  /*   * return a pointer to the page in the filemap that contains the given bit   * - * this lookup is complicated by the fact that the bitmap sb might be exactly - * 1 page (e.g., x86) or less than 1 page -- so the bitmap might start on page - * 0 or page 1   */  static inline struct page *filemap_get_page(struct bitmap_storage *store,  					    unsigned long chunk)  {  	if (file_page_index(store, chunk) >= store->file_pages)  		return NULL; -	return store->filemap[file_page_index(store, chunk) -			      - file_page_index(store, 0)]; +	return store->filemap[file_page_index(store, chunk)];  }  static int bitmap_storage_alloc(struct bitmap_storage *store, @@ -1635,7 +1631,7 @@ int bitmap_create(struct mddev *mddev)  	sector_t blocks = mddev->resync_max_sectors;  	struct file *file = mddev->bitmap_info.file;  	int err; -	struct sysfs_dirent *bm = NULL; +	struct kernfs_node *bm = NULL;  	BUILD_BUG_ON(sizeof(bitmap_super_t) != 256); @@ -1654,9 +1650,9 @@ int bitmap_create(struct mddev *mddev)  	bitmap->mddev = mddev;  	if (mddev->kobj.sd) -		bm = sysfs_get_dirent(mddev->kobj.sd, NULL, "bitmap"); +		bm = sysfs_get_dirent(mddev->kobj.sd, "bitmap");  	if (bm) { -		bitmap->sysfs_can_clear = sysfs_get_dirent(bm, NULL, "can_clear"); +		bitmap->sysfs_can_clear = sysfs_get_dirent(bm, "can_clear");  		sysfs_put(bm);  	} else  		bitmap->sysfs_can_clear = NULL; @@ -1988,7 +1984,6 @@ location_store(struct mddev *mddev, const char *buf, size_t len)  		if (mddev->bitmap_info.file) {  			struct file *f = mddev->bitmap_info.file;  			mddev->bitmap_info.file = NULL; -			restore_bitmap_write_access(f);  			fput(f);  		}  	} else { diff --git a/drivers/md/bitmap.h b/drivers/md/bitmap.h index df4aeb6ac6f..30210b9c4ef 100644 --- a/drivers/md/bitmap.h +++ b/drivers/md/bitmap.h @@ -225,7 +225,7 @@ struct bitmap {  	wait_queue_head_t overflow_wait;  	wait_queue_head_t behind_wait; -	struct sysfs_dirent *sysfs_can_clear; +	struct kernfs_node *sysfs_can_clear;  };  /* the bitmap API */ diff --git a/drivers/md/dm-bio-prison.c b/drivers/md/dm-bio-prison.c index 85f0b707425..f752d12081f 100644 --- a/drivers/md/dm-bio-prison.c +++ b/drivers/md/dm-bio-prison.c @@ -14,13 +14,17 @@  /*----------------------------------------------------------------*/ -struct dm_bio_prison { +struct bucket {  	spinlock_t lock; +	struct hlist_head cells; +}; + +struct dm_bio_prison {  	mempool_t *cell_pool;  	unsigned nr_buckets;  	unsigned hash_mask; -	struct hlist_head *cells; +	struct bucket *buckets;  };  /*----------------------------------------------------------------*/ @@ -40,6 +44,12 @@ static uint32_t calc_nr_buckets(unsigned nr_cells)  static struct kmem_cache *_cell_cache; +static void init_bucket(struct bucket *b) +{ +	spin_lock_init(&b->lock); +	INIT_HLIST_HEAD(&b->cells); +} +  /*   * @nr_cells should be the number of cells you want in use _concurrently_.   * Don't confuse it with the number of distinct keys. @@ -49,13 +59,12 @@ struct dm_bio_prison *dm_bio_prison_create(unsigned nr_cells)  	unsigned i;  	uint32_t nr_buckets = calc_nr_buckets(nr_cells);  	size_t len = sizeof(struct dm_bio_prison) + -		(sizeof(struct hlist_head) * nr_buckets); +		(sizeof(struct bucket) * nr_buckets);  	struct dm_bio_prison *prison = kmalloc(len, GFP_KERNEL);  	if (!prison)  		return NULL; -	spin_lock_init(&prison->lock);  	prison->cell_pool = mempool_create_slab_pool(nr_cells, _cell_cache);  	if (!prison->cell_pool) {  		kfree(prison); @@ -64,9 +73,9 @@ struct dm_bio_prison *dm_bio_prison_create(unsigned nr_cells)  	prison->nr_buckets = nr_buckets;  	prison->hash_mask = nr_buckets - 1; -	prison->cells = (struct hlist_head *) (prison + 1); +	prison->buckets = (struct bucket *) (prison + 1);  	for (i = 0; i < nr_buckets; i++) -		INIT_HLIST_HEAD(prison->cells + i); +		init_bucket(prison->buckets + i);  	return prison;  } @@ -107,40 +116,44 @@ static int keys_equal(struct dm_cell_key *lhs, struct dm_cell_key *rhs)  		       (lhs->block == rhs->block);  } -static struct dm_bio_prison_cell *__search_bucket(struct hlist_head *bucket, +static struct bucket *get_bucket(struct dm_bio_prison *prison, +				 struct dm_cell_key *key) +{ +	return prison->buckets + hash_key(prison, key); +} + +static struct dm_bio_prison_cell *__search_bucket(struct bucket *b,  						  struct dm_cell_key *key)  {  	struct dm_bio_prison_cell *cell; -	hlist_for_each_entry(cell, bucket, list) +	hlist_for_each_entry(cell, &b->cells, list)  		if (keys_equal(&cell->key, key))  			return cell;  	return NULL;  } -static void __setup_new_cell(struct dm_bio_prison *prison, +static void __setup_new_cell(struct bucket *b,  			     struct dm_cell_key *key,  			     struct bio *holder, -			     uint32_t hash,  			     struct dm_bio_prison_cell *cell)  {  	memcpy(&cell->key, key, sizeof(cell->key));  	cell->holder = holder;  	bio_list_init(&cell->bios); -	hlist_add_head(&cell->list, prison->cells + hash); +	hlist_add_head(&cell->list, &b->cells);  } -static int __bio_detain(struct dm_bio_prison *prison, +static int __bio_detain(struct bucket *b,  			struct dm_cell_key *key,  			struct bio *inmate,  			struct dm_bio_prison_cell *cell_prealloc,  			struct dm_bio_prison_cell **cell_result)  { -	uint32_t hash = hash_key(prison, key);  	struct dm_bio_prison_cell *cell; -	cell = __search_bucket(prison->cells + hash, key); +	cell = __search_bucket(b, key);  	if (cell) {  		if (inmate)  			bio_list_add(&cell->bios, inmate); @@ -148,7 +161,7 @@ static int __bio_detain(struct dm_bio_prison *prison,  		return 1;  	} -	__setup_new_cell(prison, key, inmate, hash, cell_prealloc); +	__setup_new_cell(b, key, inmate, cell_prealloc);  	*cell_result = cell_prealloc;  	return 0;  } @@ -161,10 +174,11 @@ static int bio_detain(struct dm_bio_prison *prison,  {  	int r;  	unsigned long flags; +	struct bucket *b = get_bucket(prison, key); -	spin_lock_irqsave(&prison->lock, flags); -	r = __bio_detain(prison, key, inmate, cell_prealloc, cell_result); -	spin_unlock_irqrestore(&prison->lock, flags); +	spin_lock_irqsave(&b->lock, flags); +	r = __bio_detain(b, key, inmate, cell_prealloc, cell_result); +	spin_unlock_irqrestore(&b->lock, flags);  	return r;  } @@ -208,10 +222,11 @@ void dm_cell_release(struct dm_bio_prison *prison,  		     struct bio_list *bios)  {  	unsigned long flags; +	struct bucket *b = get_bucket(prison, &cell->key); -	spin_lock_irqsave(&prison->lock, flags); +	spin_lock_irqsave(&b->lock, flags);  	__cell_release(cell, bios); -	spin_unlock_irqrestore(&prison->lock, flags); +	spin_unlock_irqrestore(&b->lock, flags);  }  EXPORT_SYMBOL_GPL(dm_cell_release); @@ -230,28 +245,25 @@ void dm_cell_release_no_holder(struct dm_bio_prison *prison,  			       struct bio_list *inmates)  {  	unsigned long flags; +	struct bucket *b = get_bucket(prison, &cell->key); -	spin_lock_irqsave(&prison->lock, flags); +	spin_lock_irqsave(&b->lock, flags);  	__cell_release_no_holder(cell, inmates); -	spin_unlock_irqrestore(&prison->lock, flags); +	spin_unlock_irqrestore(&b->lock, flags);  }  EXPORT_SYMBOL_GPL(dm_cell_release_no_holder);  void dm_cell_error(struct dm_bio_prison *prison, -		   struct dm_bio_prison_cell *cell) +		   struct dm_bio_prison_cell *cell, int error)  {  	struct bio_list bios;  	struct bio *bio; -	unsigned long flags;  	bio_list_init(&bios); - -	spin_lock_irqsave(&prison->lock, flags); -	__cell_release(cell, &bios); -	spin_unlock_irqrestore(&prison->lock, flags); +	dm_cell_release(prison, cell, &bios);  	while ((bio = bio_list_pop(&bios))) -		bio_io_error(bio); +		bio_endio(bio, error);  }  EXPORT_SYMBOL_GPL(dm_cell_error); diff --git a/drivers/md/dm-bio-prison.h b/drivers/md/dm-bio-prison.h index 3f833190ead..6805a142b75 100644 --- a/drivers/md/dm-bio-prison.h +++ b/drivers/md/dm-bio-prison.h @@ -85,7 +85,7 @@ void dm_cell_release_no_holder(struct dm_bio_prison *prison,  			       struct dm_bio_prison_cell *cell,  			       struct bio_list *inmates);  void dm_cell_error(struct dm_bio_prison *prison, -		   struct dm_bio_prison_cell *cell); +		   struct dm_bio_prison_cell *cell, int error);  /*----------------------------------------------------------------*/ diff --git a/drivers/md/dm-bio-record.h b/drivers/md/dm-bio-record.h index 3a8cfa2645c..dd364611156 100644 --- a/drivers/md/dm-bio-record.h +++ b/drivers/md/dm-bio-record.h @@ -17,55 +17,24 @@   * original bio state.   */ -struct dm_bio_vec_details { -#if PAGE_SIZE < 65536 -	__u16 bv_len; -	__u16 bv_offset; -#else -	unsigned bv_len; -	unsigned bv_offset; -#endif -}; -  struct dm_bio_details { -	sector_t bi_sector;  	struct block_device *bi_bdev; -	unsigned int bi_size; -	unsigned short bi_idx;  	unsigned long bi_flags; -	struct dm_bio_vec_details bi_io_vec[BIO_MAX_PAGES]; +	struct bvec_iter bi_iter;  };  static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio)  { -	unsigned i; - -	bd->bi_sector = bio->bi_sector;  	bd->bi_bdev = bio->bi_bdev; -	bd->bi_size = bio->bi_size; -	bd->bi_idx = bio->bi_idx;  	bd->bi_flags = bio->bi_flags; - -	for (i = 0; i < bio->bi_vcnt; i++) { -		bd->bi_io_vec[i].bv_len = bio->bi_io_vec[i].bv_len; -		bd->bi_io_vec[i].bv_offset = bio->bi_io_vec[i].bv_offset; -	} +	bd->bi_iter = bio->bi_iter;  }  static inline void dm_bio_restore(struct dm_bio_details *bd, struct bio *bio)  { -	unsigned i; - -	bio->bi_sector = bd->bi_sector;  	bio->bi_bdev = bd->bi_bdev; -	bio->bi_size = bd->bi_size; -	bio->bi_idx = bd->bi_idx;  	bio->bi_flags = bd->bi_flags; - -	for (i = 0; i < bio->bi_vcnt; i++) { -		bio->bi_io_vec[i].bv_len = bd->bi_io_vec[i].bv_len; -		bio->bi_io_vec[i].bv_offset = bd->bi_io_vec[i].bv_offset; -	} +	bio->bi_iter = bd->bi_iter;  }  #endif diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index 173cbb20d10..d724459860d 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -104,6 +104,8 @@ struct dm_bufio_client {  	struct list_head reserved_buffers;  	unsigned need_reserved_buffers; +	unsigned minimum_buffers; +  	struct hlist_head *cache_hash;  	wait_queue_head_t free_buffer_wait; @@ -538,7 +540,7 @@ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,  	bio_init(&b->bio);  	b->bio.bi_io_vec = b->bio_vec;  	b->bio.bi_max_vecs = DM_BUFIO_INLINE_VECS; -	b->bio.bi_sector = block << b->c->sectors_per_block_bits; +	b->bio.bi_iter.bi_sector = block << b->c->sectors_per_block_bits;  	b->bio.bi_bdev = b->c->bdev;  	b->bio.bi_end_io = end_io; @@ -605,9 +607,9 @@ static void write_endio(struct bio *bio, int error)  	BUG_ON(!test_bit(B_WRITING, &b->state)); -	smp_mb__before_clear_bit(); +	smp_mb__before_atomic();  	clear_bit(B_WRITING, &b->state); -	smp_mb__after_clear_bit(); +	smp_mb__after_atomic();  	wake_up_bit(&b->state, B_WRITING);  } @@ -861,8 +863,8 @@ static void __get_memory_limit(struct dm_bufio_client *c,  	buffers = dm_bufio_cache_size_per_client >>  		  (c->sectors_per_block_bits + SECTOR_SHIFT); -	if (buffers < DM_BUFIO_MIN_BUFFERS) -		buffers = DM_BUFIO_MIN_BUFFERS; +	if (buffers < c->minimum_buffers) +		buffers = c->minimum_buffers;  	*limit_buffers = buffers;  	*threshold_buffers = buffers * DM_BUFIO_WRITEBACK_PERCENT / 100; @@ -995,9 +997,9 @@ static void read_endio(struct bio *bio, int error)  	BUG_ON(!test_bit(B_READING, &b->state)); -	smp_mb__before_clear_bit(); +	smp_mb__before_atomic();  	clear_bit(B_READING, &b->state); -	smp_mb__after_clear_bit(); +	smp_mb__after_atomic();  	wake_up_bit(&b->state, B_READING);  } @@ -1350,6 +1352,34 @@ retry:  }  EXPORT_SYMBOL_GPL(dm_bufio_release_move); +/* + * Free the given buffer. + * + * This is just a hint, if the buffer is in use or dirty, this function + * does nothing. + */ +void dm_bufio_forget(struct dm_bufio_client *c, sector_t block) +{ +	struct dm_buffer *b; + +	dm_bufio_lock(c); + +	b = __find(c, block); +	if (b && likely(!b->hold_count) && likely(!b->state)) { +		__unlink_buffer(b); +		__free_buffer_wake(b); +	} + +	dm_bufio_unlock(c); +} +EXPORT_SYMBOL(dm_bufio_forget); + +void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n) +{ +	c->minimum_buffers = n; +} +EXPORT_SYMBOL(dm_bufio_set_minimum_buffers); +  unsigned dm_bufio_get_block_size(struct dm_bufio_client *c)  {  	return c->block_size; @@ -1511,7 +1541,7 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign  	BUG_ON(block_size < 1 << SECTOR_SHIFT ||  	       (block_size & (block_size - 1))); -	c = kmalloc(sizeof(*c), GFP_KERNEL); +	c = kzalloc(sizeof(*c), GFP_KERNEL);  	if (!c) {  		r = -ENOMEM;  		goto bad_client; @@ -1546,6 +1576,8 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign  	INIT_LIST_HEAD(&c->reserved_buffers);  	c->need_reserved_buffers = reserved_buffers; +	c->minimum_buffers = DM_BUFIO_MIN_BUFFERS; +  	init_waitqueue_head(&c->free_buffer_wait);  	c->async_write_error = 0; @@ -1717,6 +1749,11 @@ static int __init dm_bufio_init(void)  {  	__u64 mem; +	dm_bufio_allocated_kmem_cache = 0; +	dm_bufio_allocated_get_free_pages = 0; +	dm_bufio_allocated_vmalloc = 0; +	dm_bufio_current_allocated = 0; +  	memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches);  	memset(&dm_bufio_cache_names, 0, sizeof dm_bufio_cache_names); diff --git a/drivers/md/dm-bufio.h b/drivers/md/dm-bufio.h index b142946a9e3..c096779a729 100644 --- a/drivers/md/dm-bufio.h +++ b/drivers/md/dm-bufio.h @@ -108,6 +108,18 @@ int dm_bufio_issue_flush(struct dm_bufio_client *c);   */  void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block); +/* + * Free the given buffer. + * This is just a hint, if the buffer is in use or dirty, this function + * does nothing. + */ +void dm_bufio_forget(struct dm_bufio_client *c, sector_t block); + +/* + * Set the minimum number of buffers before cleanup happens. + */ +void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n); +  unsigned dm_bufio_get_block_size(struct dm_bufio_client *c);  sector_t dm_bufio_get_device_size(struct dm_bufio_client *c);  sector_t dm_bufio_get_block_number(struct dm_buffer *b); diff --git a/drivers/md/dm-builtin.c b/drivers/md/dm-builtin.c new file mode 100644 index 00000000000..6c9049c51b2 --- /dev/null +++ b/drivers/md/dm-builtin.c @@ -0,0 +1,48 @@ +#include "dm.h" + +/* + * The kobject release method must not be placed in the module itself, + * otherwise we are subject to module unload races. + * + * The release method is called when the last reference to the kobject is + * dropped. It may be called by any other kernel code that drops the last + * reference. + * + * The release method suffers from module unload race. We may prevent the + * module from being unloaded at the start of the release method (using + * increased module reference count or synchronizing against the release + * method), however there is no way to prevent the module from being + * unloaded at the end of the release method. + * + * If this code were placed in the dm module, the following race may + * happen: + *  1. Some other process takes a reference to dm kobject + *  2. The user issues ioctl function to unload the dm device + *  3. dm_sysfs_exit calls kobject_put, however the object is not released + *     because of the other reference taken at step 1 + *  4. dm_sysfs_exit waits on the completion + *  5. The other process that took the reference in step 1 drops it, + *     dm_kobject_release is called from this process + *  6. dm_kobject_release calls complete() + *  7. a reschedule happens before dm_kobject_release returns + *  8. dm_sysfs_exit continues, the dm device is unloaded, module reference + *     count is decremented + *  9. The user unloads the dm module + * 10. The other process that was rescheduled in step 7 continues to run, + *     it is now executing code in unloaded module, so it crashes + * + * Note that if the process that takes the foreign reference to dm kobject + * has a low priority and the system is sufficiently loaded with + * higher-priority processes that prevent the low-priority process from + * being scheduled long enough, this bug may really happen. + * + * In order to fix this module unload race, we place the release method + * into a helper code that is compiled directly into the kernel. + */ + +void dm_kobject_release(struct kobject *kobj) +{ +	complete(dm_get_completion_from_kobject(kobj)); +} + +EXPORT_SYMBOL(dm_kobject_release); diff --git a/drivers/md/dm-cache-block-types.h b/drivers/md/dm-cache-block-types.h index bed4ad4e1b7..aac0e2df06b 100644 --- a/drivers/md/dm-cache-block-types.h +++ b/drivers/md/dm-cache-block-types.h @@ -19,7 +19,6 @@  typedef dm_block_t __bitwise__ dm_oblock_t;  typedef uint32_t __bitwise__ dm_cblock_t; -typedef dm_block_t __bitwise__ dm_dblock_t;  static inline dm_oblock_t to_oblock(dm_block_t b)  { @@ -41,14 +40,4 @@ static inline uint32_t from_cblock(dm_cblock_t b)  	return (__force uint32_t) b;  } -static inline dm_dblock_t to_dblock(dm_block_t b) -{ -	return (__force dm_dblock_t) b; -} - -static inline dm_block_t from_dblock(dm_dblock_t b) -{ -	return (__force dm_block_t) b; -} -  #endif /* DM_CACHE_BLOCK_TYPES_H */ diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c index 1af7255bbff..d2899e7eb3a 100644 --- a/drivers/md/dm-cache-metadata.c +++ b/drivers/md/dm-cache-metadata.c @@ -20,7 +20,13 @@  #define CACHE_SUPERBLOCK_MAGIC 06142003  #define CACHE_SUPERBLOCK_LOCATION 0 -#define CACHE_VERSION 1 + +/* + * defines a range of metadata versions that this module can handle. + */ +#define MIN_CACHE_VERSION 1 +#define MAX_CACHE_VERSION 1 +  #define CACHE_METADATA_CACHE_SIZE 64  /* @@ -103,7 +109,7 @@ struct dm_cache_metadata {  	dm_block_t discard_root;  	sector_t discard_block_size; -	dm_dblock_t discard_nr_blocks; +	dm_oblock_t discard_nr_blocks;  	sector_t data_block_size;  	dm_cblock_t cache_blocks; @@ -114,6 +120,12 @@ struct dm_cache_metadata {  	unsigned policy_version[CACHE_POLICY_VERSION_SIZE];  	size_t policy_hint_size;  	struct dm_cache_statistics stats; + +	/* +	 * Reading the space map root can fail, so we read it into this +	 * buffer before the superblock is locked and updated. +	 */ +	__u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE];  };  /*------------------------------------------------------------------- @@ -134,6 +146,18 @@ static void sb_prepare_for_write(struct dm_block_validator *v,  						      SUPERBLOCK_CSUM_XOR));  } +static int check_metadata_version(struct cache_disk_superblock *disk_super) +{ +	uint32_t metadata_version = le32_to_cpu(disk_super->version); +	if (metadata_version < MIN_CACHE_VERSION || metadata_version > MAX_CACHE_VERSION) { +		DMERR("Cache metadata version %u found, but only versions between %u and %u supported.", +		      metadata_version, MIN_CACHE_VERSION, MAX_CACHE_VERSION); +		return -EINVAL; +	} + +	return 0; +} +  static int sb_check(struct dm_block_validator *v,  		    struct dm_block *b,  		    size_t sb_block_size) @@ -164,7 +188,7 @@ static int sb_check(struct dm_block_validator *v,  		return -EILSEQ;  	} -	return 0; +	return check_metadata_version(disk_super);  }  static struct dm_block_validator sb_validator = { @@ -198,7 +222,7 @@ static int superblock_lock(struct dm_cache_metadata *cmd,  /*----------------------------------------------------------------*/ -static int __superblock_all_zeroes(struct dm_block_manager *bm, int *result) +static int __superblock_all_zeroes(struct dm_block_manager *bm, bool *result)  {  	int r;  	unsigned i; @@ -214,10 +238,10 @@ static int __superblock_all_zeroes(struct dm_block_manager *bm, int *result)  		return r;  	data_le = dm_block_data(b); -	*result = 1; +	*result = true;  	for (i = 0; i < sb_block_size; i++) {  		if (data_le[i] != zero) { -			*result = 0; +			*result = false;  			break;  		}  	} @@ -242,11 +266,31 @@ static void __setup_mapping_info(struct dm_cache_metadata *cmd)  	}  } +static int __save_sm_root(struct dm_cache_metadata *cmd) +{ +	int r; +	size_t metadata_len; + +	r = dm_sm_root_size(cmd->metadata_sm, &metadata_len); +	if (r < 0) +		return r; + +	return dm_sm_copy_root(cmd->metadata_sm, &cmd->metadata_space_map_root, +			       metadata_len); +} + +static void __copy_sm_root(struct dm_cache_metadata *cmd, +			   struct cache_disk_superblock *disk_super) +{ +	memcpy(&disk_super->metadata_space_map_root, +	       &cmd->metadata_space_map_root, +	       sizeof(cmd->metadata_space_map_root)); +} +  static int __write_initial_superblock(struct dm_cache_metadata *cmd)  {  	int r;  	struct dm_block *sblock; -	size_t metadata_len;  	struct cache_disk_superblock *disk_super;  	sector_t bdev_size = i_size_read(cmd->bdev->bd_inode) >> SECTOR_SHIFT; @@ -254,12 +298,16 @@ static int __write_initial_superblock(struct dm_cache_metadata *cmd)  	if (bdev_size > DM_CACHE_METADATA_MAX_SECTORS)  		bdev_size = DM_CACHE_METADATA_MAX_SECTORS; -	r = dm_sm_root_size(cmd->metadata_sm, &metadata_len); +	r = dm_tm_pre_commit(cmd->tm);  	if (r < 0)  		return r; -	r = dm_tm_pre_commit(cmd->tm); -	if (r < 0) +	/* +	 * dm_sm_copy_root() can fail.  So we need to do it before we start +	 * updating the superblock. +	 */ +	r = __save_sm_root(cmd); +	if (r)  		return r;  	r = superblock_lock_zero(cmd, &sblock); @@ -270,21 +318,18 @@ static int __write_initial_superblock(struct dm_cache_metadata *cmd)  	disk_super->flags = 0;  	memset(disk_super->uuid, 0, sizeof(disk_super->uuid));  	disk_super->magic = cpu_to_le64(CACHE_SUPERBLOCK_MAGIC); -	disk_super->version = cpu_to_le32(CACHE_VERSION); +	disk_super->version = cpu_to_le32(MAX_CACHE_VERSION);  	memset(disk_super->policy_name, 0, sizeof(disk_super->policy_name));  	memset(disk_super->policy_version, 0, sizeof(disk_super->policy_version));  	disk_super->policy_hint_size = 0; -	r = dm_sm_copy_root(cmd->metadata_sm, &disk_super->metadata_space_map_root, -			    metadata_len); -	if (r < 0) -		goto bad_locked; +	__copy_sm_root(cmd, disk_super);  	disk_super->mapping_root = cpu_to_le64(cmd->root);  	disk_super->hint_root = cpu_to_le64(cmd->hint_root);  	disk_super->discard_root = cpu_to_le64(cmd->discard_root);  	disk_super->discard_block_size = cpu_to_le64(cmd->discard_block_size); -	disk_super->discard_nr_blocks = cpu_to_le64(from_dblock(cmd->discard_nr_blocks)); +	disk_super->discard_nr_blocks = cpu_to_le64(from_oblock(cmd->discard_nr_blocks));  	disk_super->metadata_block_size = cpu_to_le32(DM_CACHE_METADATA_BLOCK_SIZE >> SECTOR_SHIFT);  	disk_super->data_block_size = cpu_to_le32(cmd->data_block_size);  	disk_super->cache_blocks = cpu_to_le32(0); @@ -295,10 +340,6 @@ static int __write_initial_superblock(struct dm_cache_metadata *cmd)  	disk_super->write_misses = cpu_to_le32(0);  	return dm_tm_commit(cmd->tm, sblock); - -bad_locked: -	dm_bm_unlock(sblock); -	return r;  }  static int __format_metadata(struct dm_cache_metadata *cmd) @@ -384,6 +425,15 @@ static int __open_metadata(struct dm_cache_metadata *cmd)  	disk_super = dm_block_data(sblock); +	/* Verify the data block size hasn't changed */ +	if (le32_to_cpu(disk_super->data_block_size) != cmd->data_block_size) { +		DMERR("changing the data block size (from %u to %llu) is not supported", +		      le32_to_cpu(disk_super->data_block_size), +		      (unsigned long long)cmd->data_block_size); +		r = -EINVAL; +		goto bad; +	} +  	r = __check_incompat_features(disk_super, cmd);  	if (r < 0)  		goto bad; @@ -411,7 +461,8 @@ bad:  static int __open_or_format_metadata(struct dm_cache_metadata *cmd,  				     bool format_device)  { -	int r, unformatted; +	int r; +	bool unformatted = false;  	r = __superblock_all_zeroes(cmd->bm, &unformatted);  	if (r) @@ -477,7 +528,7 @@ static void read_superblock_fields(struct dm_cache_metadata *cmd,  	cmd->hint_root = le64_to_cpu(disk_super->hint_root);  	cmd->discard_root = le64_to_cpu(disk_super->discard_root);  	cmd->discard_block_size = le64_to_cpu(disk_super->discard_block_size); -	cmd->discard_nr_blocks = to_dblock(le64_to_cpu(disk_super->discard_nr_blocks)); +	cmd->discard_nr_blocks = to_oblock(le64_to_cpu(disk_super->discard_nr_blocks));  	cmd->data_block_size = le32_to_cpu(disk_super->data_block_size);  	cmd->cache_blocks = to_cblock(le32_to_cpu(disk_super->cache_blocks));  	strncpy(cmd->policy_name, disk_super->policy_name, sizeof(cmd->policy_name)); @@ -511,8 +562,9 @@ static int __begin_transaction_flags(struct dm_cache_metadata *cmd,  	disk_super = dm_block_data(sblock);  	update_flags(disk_super, mutator);  	read_superblock_fields(cmd, disk_super); +	dm_bm_unlock(sblock); -	return dm_bm_flush_and_unlock(cmd->bm, sblock); +	return dm_bm_flush(cmd->bm);  }  static int __begin_transaction(struct dm_cache_metadata *cmd) @@ -540,7 +592,6 @@ static int __commit_transaction(struct dm_cache_metadata *cmd,  				flags_mutator mutator)  {  	int r; -	size_t metadata_len;  	struct cache_disk_superblock *disk_super;  	struct dm_block *sblock; @@ -558,8 +609,8 @@ static int __commit_transaction(struct dm_cache_metadata *cmd,  	if (r < 0)  		return r; -	r = dm_sm_root_size(cmd->metadata_sm, &metadata_len); -	if (r < 0) +	r = __save_sm_root(cmd); +	if (r)  		return r;  	r = superblock_lock(cmd, &sblock); @@ -575,7 +626,7 @@ static int __commit_transaction(struct dm_cache_metadata *cmd,  	disk_super->hint_root = cpu_to_le64(cmd->hint_root);  	disk_super->discard_root = cpu_to_le64(cmd->discard_root);  	disk_super->discard_block_size = cpu_to_le64(cmd->discard_block_size); -	disk_super->discard_nr_blocks = cpu_to_le64(from_dblock(cmd->discard_nr_blocks)); +	disk_super->discard_nr_blocks = cpu_to_le64(from_oblock(cmd->discard_nr_blocks));  	disk_super->cache_blocks = cpu_to_le32(from_cblock(cmd->cache_blocks));  	strncpy(disk_super->policy_name, cmd->policy_name, sizeof(disk_super->policy_name));  	disk_super->policy_version[0] = cpu_to_le32(cmd->policy_version[0]); @@ -586,13 +637,7 @@ static int __commit_transaction(struct dm_cache_metadata *cmd,  	disk_super->read_misses = cpu_to_le32(cmd->stats.read_misses);  	disk_super->write_hits = cpu_to_le32(cmd->stats.write_hits);  	disk_super->write_misses = cpu_to_le32(cmd->stats.write_misses); - -	r = dm_sm_copy_root(cmd->metadata_sm, &disk_super->metadata_space_map_root, -			    metadata_len); -	if (r < 0) { -		dm_bm_unlock(sblock); -		return r; -	} +	__copy_sm_root(cmd, disk_super);  	return dm_tm_commit(cmd->tm, sblock);  } @@ -666,19 +711,85 @@ void dm_cache_metadata_close(struct dm_cache_metadata *cmd)  	kfree(cmd);  } +/* + * Checks that the given cache block is either unmapped or clean. + */ +static int block_unmapped_or_clean(struct dm_cache_metadata *cmd, dm_cblock_t b, +				   bool *result) +{ +	int r; +	__le64 value; +	dm_oblock_t ob; +	unsigned flags; + +	r = dm_array_get_value(&cmd->info, cmd->root, from_cblock(b), &value); +	if (r) { +		DMERR("block_unmapped_or_clean failed"); +		return r; +	} + +	unpack_value(value, &ob, &flags); +	*result = !((flags & M_VALID) && (flags & M_DIRTY)); + +	return 0; +} + +static int blocks_are_unmapped_or_clean(struct dm_cache_metadata *cmd, +					dm_cblock_t begin, dm_cblock_t end, +					bool *result) +{ +	int r; +	*result = true; + +	while (begin != end) { +		r = block_unmapped_or_clean(cmd, begin, result); +		if (r) +			return r; + +		if (!*result) { +			DMERR("cache block %llu is dirty", +			      (unsigned long long) from_cblock(begin)); +			return 0; +		} + +		begin = to_cblock(from_cblock(begin) + 1); +	} + +	return 0; +} +  int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size)  {  	int r; +	bool clean;  	__le64 null_mapping = pack_value(0, 0);  	down_write(&cmd->root_lock);  	__dm_bless_for_disk(&null_mapping); + +	if (from_cblock(new_cache_size) < from_cblock(cmd->cache_blocks)) { +		r = blocks_are_unmapped_or_clean(cmd, new_cache_size, cmd->cache_blocks, &clean); +		if (r) { +			__dm_unbless_for_disk(&null_mapping); +			goto out; +		} + +		if (!clean) { +			DMERR("unable to shrink cache due to dirty blocks"); +			r = -EINVAL; +			__dm_unbless_for_disk(&null_mapping); +			goto out; +		} +	} +  	r = dm_array_resize(&cmd->info, cmd->root, from_cblock(cmd->cache_blocks),  			    from_cblock(new_cache_size),  			    &null_mapping, &cmd->root);  	if (!r)  		cmd->cache_blocks = new_cache_size;  	cmd->changed = true; + +out:  	up_write(&cmd->root_lock);  	return r; @@ -686,15 +797,15 @@ int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size)  int dm_cache_discard_bitset_resize(struct dm_cache_metadata *cmd,  				   sector_t discard_block_size, -				   dm_dblock_t new_nr_entries) +				   dm_oblock_t new_nr_entries)  {  	int r;  	down_write(&cmd->root_lock);  	r = dm_bitset_resize(&cmd->discard_info,  			     cmd->discard_root, -			     from_dblock(cmd->discard_nr_blocks), -			     from_dblock(new_nr_entries), +			     from_oblock(cmd->discard_nr_blocks), +			     from_oblock(new_nr_entries),  			     false, &cmd->discard_root);  	if (!r) {  		cmd->discard_block_size = discard_block_size; @@ -707,28 +818,28 @@ int dm_cache_discard_bitset_resize(struct dm_cache_metadata *cmd,  	return r;  } -static int __set_discard(struct dm_cache_metadata *cmd, dm_dblock_t b) +static int __set_discard(struct dm_cache_metadata *cmd, dm_oblock_t b)  {  	return dm_bitset_set_bit(&cmd->discard_info, cmd->discard_root, -				 from_dblock(b), &cmd->discard_root); +				 from_oblock(b), &cmd->discard_root);  } -static int __clear_discard(struct dm_cache_metadata *cmd, dm_dblock_t b) +static int __clear_discard(struct dm_cache_metadata *cmd, dm_oblock_t b)  {  	return dm_bitset_clear_bit(&cmd->discard_info, cmd->discard_root, -				   from_dblock(b), &cmd->discard_root); +				   from_oblock(b), &cmd->discard_root);  } -static int __is_discarded(struct dm_cache_metadata *cmd, dm_dblock_t b, +static int __is_discarded(struct dm_cache_metadata *cmd, dm_oblock_t b,  			  bool *is_discarded)  {  	return dm_bitset_test_bit(&cmd->discard_info, cmd->discard_root, -				  from_dblock(b), &cmd->discard_root, +				  from_oblock(b), &cmd->discard_root,  				  is_discarded);  }  static int __discard(struct dm_cache_metadata *cmd, -		     dm_dblock_t dblock, bool discard) +		     dm_oblock_t dblock, bool discard)  {  	int r; @@ -741,7 +852,7 @@ static int __discard(struct dm_cache_metadata *cmd,  }  int dm_cache_set_discard(struct dm_cache_metadata *cmd, -			 dm_dblock_t dblock, bool discard) +			 dm_oblock_t dblock, bool discard)  {  	int r; @@ -759,8 +870,8 @@ static int __load_discards(struct dm_cache_metadata *cmd,  	dm_block_t b;  	bool discard; -	for (b = 0; b < from_dblock(cmd->discard_nr_blocks); b++) { -		dm_dblock_t dblock = to_dblock(b); +	for (b = 0; b < from_oblock(cmd->discard_nr_blocks); b++) { +		dm_oblock_t dblock = to_oblock(b);  		if (cmd->clean_when_opened) {  			r = __is_discarded(cmd, dblock, &discard); @@ -1143,22 +1254,12 @@ static int begin_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *po  	return 0;  } -int dm_cache_begin_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy) +static int save_hint(void *context, dm_cblock_t cblock, dm_oblock_t oblock, uint32_t hint)  { +	struct dm_cache_metadata *cmd = context; +	__le32 value = cpu_to_le32(hint);  	int r; -	down_write(&cmd->root_lock); -	r = begin_hints(cmd, policy); -	up_write(&cmd->root_lock); - -	return r; -} - -static int save_hint(struct dm_cache_metadata *cmd, dm_cblock_t cblock, -		     uint32_t hint) -{ -	int r; -	__le32 value = cpu_to_le32(hint);  	__dm_bless_for_disk(&value);  	r = dm_array_set_value(&cmd->hint_info, cmd->hint_root, @@ -1168,17 +1269,31 @@ static int save_hint(struct dm_cache_metadata *cmd, dm_cblock_t cblock,  	return r;  } -int dm_cache_save_hint(struct dm_cache_metadata *cmd, dm_cblock_t cblock, -		       uint32_t hint) +static int write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy)  {  	int r; -	if (!hints_array_initialized(cmd)) -		return 0; +	r = begin_hints(cmd, policy); +	if (r) { +		DMERR("begin_hints failed"); +		return r; +	} + +	return policy_walk_mappings(policy, save_hint, cmd); +} + +int dm_cache_write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy) +{ +	int r;  	down_write(&cmd->root_lock); -	r = save_hint(cmd, cblock, hint); +	r = write_hints(cmd, policy);  	up_write(&cmd->root_lock);  	return r;  } + +int dm_cache_metadata_all_clean(struct dm_cache_metadata *cmd, bool *result) +{ +	return blocks_are_unmapped_or_clean(cmd, 0, cmd->cache_blocks, result); +} diff --git a/drivers/md/dm-cache-metadata.h b/drivers/md/dm-cache-metadata.h index f45cef21f3d..cd70a78623a 100644 --- a/drivers/md/dm-cache-metadata.h +++ b/drivers/md/dm-cache-metadata.h @@ -72,14 +72,14 @@ dm_cblock_t dm_cache_size(struct dm_cache_metadata *cmd);  int dm_cache_discard_bitset_resize(struct dm_cache_metadata *cmd,  				   sector_t discard_block_size, -				   dm_dblock_t new_nr_entries); +				   dm_oblock_t new_nr_entries);  typedef int (*load_discard_fn)(void *context, sector_t discard_block_size, -			       dm_dblock_t dblock, bool discarded); +			       dm_oblock_t dblock, bool discarded);  int dm_cache_load_discards(struct dm_cache_metadata *cmd,  			   load_discard_fn fn, void *context); -int dm_cache_set_discard(struct dm_cache_metadata *cmd, dm_dblock_t dblock, bool discard); +int dm_cache_set_discard(struct dm_cache_metadata *cmd, dm_oblock_t dblock, bool discard);  int dm_cache_remove_mapping(struct dm_cache_metadata *cmd, dm_cblock_t cblock);  int dm_cache_insert_mapping(struct dm_cache_metadata *cmd, dm_cblock_t cblock, dm_oblock_t oblock); @@ -128,14 +128,12 @@ void dm_cache_dump(struct dm_cache_metadata *cmd);   * rather than querying the policy for each cblock, we let it walk its data   * structures and fill in the hints in whatever order it wishes.   */ - -int dm_cache_begin_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *p); +int dm_cache_write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *p);  /* - * requests hints for every cblock and stores in the metadata device. + * Query method.  Are all the blocks in the cache clean?   */ -int dm_cache_save_hint(struct dm_cache_metadata *cmd, -		       dm_cblock_t cblock, uint32_t hint); +int dm_cache_metadata_all_clean(struct dm_cache_metadata *cmd, bool *result);  /*----------------------------------------------------------------*/ diff --git a/drivers/md/dm-cache-policy-internal.h b/drivers/md/dm-cache-policy-internal.h index 0928abdc49f..2256a1f24f7 100644 --- a/drivers/md/dm-cache-policy-internal.h +++ b/drivers/md/dm-cache-policy-internal.h @@ -61,7 +61,12 @@ static inline int policy_writeback_work(struct dm_cache_policy *p,  static inline void policy_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock)  { -	return p->remove_mapping(p, oblock); +	p->remove_mapping(p, oblock); +} + +static inline int policy_remove_cblock(struct dm_cache_policy *p, dm_cblock_t cblock) +{ +	return p->remove_cblock(p, cblock);  }  static inline void policy_force_mapping(struct dm_cache_policy *p, diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c index 4296155090b..0e385e40909 100644 --- a/drivers/md/dm-cache-policy-mq.c +++ b/drivers/md/dm-cache-policy-mq.c @@ -26,19 +26,6 @@ static unsigned next_power(unsigned n, unsigned min)  /*----------------------------------------------------------------*/ -static unsigned long *alloc_bitset(unsigned nr_entries) -{ -	size_t s = sizeof(unsigned long) * dm_div_up(nr_entries, BITS_PER_LONG); -	return vzalloc(s); -} - -static void free_bitset(unsigned long *bits) -{ -	vfree(bits); -} - -/*----------------------------------------------------------------*/ -  /*   * Large, sequential ios are probably better left on the origin device since   * spindles tend to have good bandwidth. @@ -85,7 +72,7 @@ static enum io_pattern iot_pattern(struct io_tracker *t)  static void iot_update_stats(struct io_tracker *t, struct bio *bio)  { -	if (bio->bi_sector == from_oblock(t->last_end_oblock) + 1) +	if (bio->bi_iter.bi_sector == from_oblock(t->last_end_oblock) + 1)  		t->nr_seq_samples++;  	else {  		/* @@ -100,7 +87,7 @@ static void iot_update_stats(struct io_tracker *t, struct bio *bio)  		t->nr_rand_samples++;  	} -	t->last_end_oblock = to_oblock(bio->bi_sector + bio_sectors(bio) - 1); +	t->last_end_oblock = to_oblock(bio_end_sector(bio) - 1);  }  static void iot_check_for_pattern_switch(struct io_tracker *t) @@ -151,6 +138,21 @@ static void queue_init(struct queue *q)  }  /* + * Checks to see if the queue is empty. + * FIXME: reduce cpu usage. + */ +static bool queue_empty(struct queue *q) +{ +	unsigned i; + +	for (i = 0; i < NR_QUEUE_LEVELS; i++) +		if (!list_empty(q->qs + i)) +			return false; + +	return true; +} + +/*   * Insert an entry to the back of the given level.   */  static void queue_push(struct queue *q, unsigned level, struct list_head *elt) @@ -218,17 +220,115 @@ struct entry {  	struct hlist_node hlist;  	struct list_head list;  	dm_oblock_t oblock; -	dm_cblock_t cblock;	/* valid iff in_cache */  	/*  	 * FIXME: pack these better  	 */ -	bool in_cache:1; +	bool dirty:1;  	unsigned hit_count;  	unsigned generation;  	unsigned tick;  }; +/* + * Rather than storing the cblock in an entry, we allocate all entries in + * an array, and infer the cblock from the entry position. + * + * Free entries are linked together into a list. + */ +struct entry_pool { +	struct entry *entries, *entries_end; +	struct list_head free; +	unsigned nr_allocated; +}; + +static int epool_init(struct entry_pool *ep, unsigned nr_entries) +{ +	unsigned i; + +	ep->entries = vzalloc(sizeof(struct entry) * nr_entries); +	if (!ep->entries) +		return -ENOMEM; + +	ep->entries_end = ep->entries + nr_entries; + +	INIT_LIST_HEAD(&ep->free); +	for (i = 0; i < nr_entries; i++) +		list_add(&ep->entries[i].list, &ep->free); + +	ep->nr_allocated = 0; + +	return 0; +} + +static void epool_exit(struct entry_pool *ep) +{ +	vfree(ep->entries); +} + +static struct entry *alloc_entry(struct entry_pool *ep) +{ +	struct entry *e; + +	if (list_empty(&ep->free)) +		return NULL; + +	e = list_entry(list_pop(&ep->free), struct entry, list); +	INIT_LIST_HEAD(&e->list); +	INIT_HLIST_NODE(&e->hlist); +	ep->nr_allocated++; + +	return e; +} + +/* + * This assumes the cblock hasn't already been allocated. + */ +static struct entry *alloc_particular_entry(struct entry_pool *ep, dm_cblock_t cblock) +{ +	struct entry *e = ep->entries + from_cblock(cblock); + +	list_del_init(&e->list); +	INIT_HLIST_NODE(&e->hlist); +	ep->nr_allocated++; + +	return e; +} + +static void free_entry(struct entry_pool *ep, struct entry *e) +{ +	BUG_ON(!ep->nr_allocated); +	ep->nr_allocated--; +	INIT_HLIST_NODE(&e->hlist); +	list_add(&e->list, &ep->free); +} + +/* + * Returns NULL if the entry is free. + */ +static struct entry *epool_find(struct entry_pool *ep, dm_cblock_t cblock) +{ +	struct entry *e = ep->entries + from_cblock(cblock); +	return !hlist_unhashed(&e->hlist) ? e : NULL; +} + +static bool epool_empty(struct entry_pool *ep) +{ +	return list_empty(&ep->free); +} + +static bool in_pool(struct entry_pool *ep, struct entry *e) +{ +	return e >= ep->entries && e < ep->entries_end; +} + +static dm_cblock_t infer_cblock(struct entry_pool *ep, struct entry *e) +{ +	return to_cblock(e - ep->entries); +} + +/*----------------------------------------------------------------*/ +  struct mq_policy {  	struct dm_cache_policy policy; @@ -238,13 +338,22 @@ struct mq_policy {  	struct io_tracker tracker;  	/* -	 * We maintain two queues of entries.  The cache proper contains -	 * the currently active mappings.  Whereas the pre_cache tracks -	 * blocks that are being hit frequently and potential candidates -	 * for promotion to the cache. +	 * Entries come from two pools, one of pre-cache entries, and one +	 * for the cache proper. +	 */ +	struct entry_pool pre_cache_pool; +	struct entry_pool cache_pool; + +	/* +	 * We maintain three queues of entries.  The cache proper, +	 * consisting of a clean and dirty queue, contains the currently +	 * active mappings.  Whereas the pre_cache tracks blocks that +	 * are being hit frequently and potential candidates for promotion +	 * to the cache.  	 */  	struct queue pre_cache; -	struct queue cache; +	struct queue cache_clean; +	struct queue cache_dirty;  	/*  	 * Keeps track of time, incremented by the core.  We use this to @@ -281,24 +390,9 @@ struct mq_policy {  	 */  	unsigned promote_threshold; -	/* -	 * We need cache_size entries for the cache, and choose to have -	 * cache_size entries for the pre_cache too.  One motivation for -	 * using the same size is to make the hit counts directly -	 * comparable between pre_cache and cache. -	 */ -	unsigned nr_entries; -	unsigned nr_entries_allocated; -	struct list_head free; - -	/* -	 * Cache blocks may be unallocated.  We store this info in a -	 * bitset. -	 */ -	unsigned long *allocation_bitset; -	unsigned nr_cblocks_allocated; -	unsigned find_free_nr_words; -	unsigned find_free_last_word; +	unsigned discard_promote_adjustment; +	unsigned read_promote_adjustment; +	unsigned write_promote_adjustment;  	/*  	 * The hash table allows us to quickly find an entry by origin @@ -309,48 +403,9 @@ struct mq_policy {  	struct hlist_head *table;  }; -/*----------------------------------------------------------------*/ -/* Free/alloc mq cache entry structures. */ -static void takeout_queue(struct list_head *lh, struct queue *q) -{ -	unsigned level; - -	for (level = 0; level < NR_QUEUE_LEVELS; level++) -		list_splice(q->qs + level, lh); -} - -static void free_entries(struct mq_policy *mq) -{ -	struct entry *e, *tmp; - -	takeout_queue(&mq->free, &mq->pre_cache); -	takeout_queue(&mq->free, &mq->cache); - -	list_for_each_entry_safe(e, tmp, &mq->free, list) -		kmem_cache_free(mq_entry_cache, e); -} - -static int alloc_entries(struct mq_policy *mq, unsigned elts) -{ -	unsigned u = mq->nr_entries; - -	INIT_LIST_HEAD(&mq->free); -	mq->nr_entries_allocated = 0; - -	while (u--) { -		struct entry *e = kmem_cache_zalloc(mq_entry_cache, GFP_KERNEL); - -		if (!e) { -			free_entries(mq); -			return -ENOMEM; -		} - - -		list_add(&e->list, &mq->free); -	} - -	return 0; -} +#define DEFAULT_DISCARD_PROMOTE_ADJUSTMENT 1 +#define DEFAULT_READ_PROMOTE_ADJUSTMENT 4 +#define DEFAULT_WRITE_PROMOTE_ADJUSTMENT 8  /*----------------------------------------------------------------*/ @@ -388,96 +443,14 @@ static void hash_remove(struct entry *e)  /*----------------------------------------------------------------*/ -/* - * Allocates a new entry structure.  The memory is allocated in one lump, - * so we just handing it out here.  Returns NULL if all entries have - * already been allocated.  Cannot fail otherwise. - */ -static struct entry *alloc_entry(struct mq_policy *mq) -{ -	struct entry *e; - -	if (mq->nr_entries_allocated >= mq->nr_entries) { -		BUG_ON(!list_empty(&mq->free)); -		return NULL; -	} - -	e = list_entry(list_pop(&mq->free), struct entry, list); -	INIT_LIST_HEAD(&e->list); -	INIT_HLIST_NODE(&e->hlist); - -	mq->nr_entries_allocated++; -	return e; -} - -/*----------------------------------------------------------------*/ - -/* - * Mark cache blocks allocated or not in the bitset. - */ -static void alloc_cblock(struct mq_policy *mq, dm_cblock_t cblock) -{ -	BUG_ON(from_cblock(cblock) > from_cblock(mq->cache_size)); -	BUG_ON(test_bit(from_cblock(cblock), mq->allocation_bitset)); - -	set_bit(from_cblock(cblock), mq->allocation_bitset); -	mq->nr_cblocks_allocated++; -} - -static void free_cblock(struct mq_policy *mq, dm_cblock_t cblock) -{ -	BUG_ON(from_cblock(cblock) > from_cblock(mq->cache_size)); -	BUG_ON(!test_bit(from_cblock(cblock), mq->allocation_bitset)); - -	clear_bit(from_cblock(cblock), mq->allocation_bitset); -	mq->nr_cblocks_allocated--; -} -  static bool any_free_cblocks(struct mq_policy *mq)  { -	return mq->nr_cblocks_allocated < from_cblock(mq->cache_size); +	return !epool_empty(&mq->cache_pool);  } -/* - * Fills result out with a cache block that isn't in use, or return - * -ENOSPC.  This does _not_ mark the cblock as allocated, the caller is - * reponsible for that. - */ -static int __find_free_cblock(struct mq_policy *mq, unsigned begin, unsigned end, -			      dm_cblock_t *result, unsigned *last_word) +static bool any_clean_cblocks(struct mq_policy *mq)  { -	int r = -ENOSPC; -	unsigned w; - -	for (w = begin; w < end; w++) { -		/* -		 * ffz is undefined if no zero exists -		 */ -		if (mq->allocation_bitset[w] != ~0UL) { -			*last_word = w; -			*result = to_cblock((w * BITS_PER_LONG) + ffz(mq->allocation_bitset[w])); -			if (from_cblock(*result) < from_cblock(mq->cache_size)) -				r = 0; - -			break; -		} -	} - -	return r; -} - -static int find_free_cblock(struct mq_policy *mq, dm_cblock_t *result) -{ -	int r; - -	if (!any_free_cblocks(mq)) -		return -ENOSPC; - -	r = __find_free_cblock(mq, mq->find_free_last_word, mq->find_free_nr_words, result, &mq->find_free_last_word); -	if (r == -ENOSPC && mq->find_free_last_word) -		r = __find_free_cblock(mq, 0, mq->find_free_last_word, result, &mq->find_free_last_word); - -	return r; +	return !queue_empty(&mq->cache_clean);  }  /*----------------------------------------------------------------*/ @@ -496,33 +469,35 @@ static unsigned queue_level(struct entry *e)  	return min((unsigned) ilog2(e->hit_count), NR_QUEUE_LEVELS - 1u);  } +static bool in_cache(struct mq_policy *mq, struct entry *e) +{ +	return in_pool(&mq->cache_pool, e); +} +  /*   * Inserts the entry into the pre_cache or the cache.  Ensures the cache - * block is marked as allocated if necc.  Inserts into the hash table.  Sets the - * tick which records when the entry was last moved about. + * block is marked as allocated if necc.  Inserts into the hash table. + * Sets the tick which records when the entry was last moved about.   */  static void push(struct mq_policy *mq, struct entry *e)  {  	e->tick = mq->tick;  	hash_insert(mq, e); -	if (e->in_cache) { -		alloc_cblock(mq, e->cblock); -		queue_push(&mq->cache, queue_level(e), &e->list); -	} else +	if (in_cache(mq, e)) +		queue_push(e->dirty ? &mq->cache_dirty : &mq->cache_clean, +			   queue_level(e), &e->list); +	else  		queue_push(&mq->pre_cache, queue_level(e), &e->list);  }  /*   * Removes an entry from pre_cache or cache.  Removes from the hash table. - * Frees off the cache block if necc.   */  static void del(struct mq_policy *mq, struct entry *e)  {  	queue_remove(&e->list);  	hash_remove(e); -	if (e->in_cache) -		free_cblock(mq, e->cblock);  }  /* @@ -531,14 +506,14 @@ static void del(struct mq_policy *mq, struct entry *e)   */  static struct entry *pop(struct mq_policy *mq, struct queue *q)  { -	struct entry *e = container_of(queue_pop(q), struct entry, list); +	struct entry *e; +	struct list_head *h = queue_pop(q); -	if (e) { -		hash_remove(e); +	if (!h) +		return NULL; -		if (e->in_cache) -			free_cblock(mq, e->cblock); -	} +	e = container_of(h, struct entry, list); +	hash_remove(e);  	return e;  } @@ -556,7 +531,8 @@ static bool updated_this_tick(struct mq_policy *mq, struct entry *e)   * of the entries.   *   * At the moment the threshold is taken by averaging the hit counts of some - * of the entries in the cache (the first 20 entries of the first level). + * of the entries in the cache (the first 20 entries across all levels in + * ascending order, giving preference to the clean entries at each level).   *   * We can be much cleverer than this though.  For example, each promotion   * could bump up the threshold helping to prevent churn.  Much more to do @@ -571,14 +547,21 @@ static void check_generation(struct mq_policy *mq)  	struct list_head *head;  	struct entry *e; -	if ((mq->hit_count >= mq->generation_period) && -	    (mq->nr_cblocks_allocated == from_cblock(mq->cache_size))) { - +	if ((mq->hit_count >= mq->generation_period) && (epool_empty(&mq->cache_pool))) {  		mq->hit_count = 0;  		mq->generation++;  		for (level = 0; level < NR_QUEUE_LEVELS && count < MAX_TO_AVERAGE; level++) { -			head = mq->cache.qs + level; +			head = mq->cache_clean.qs + level; +			list_for_each_entry(e, head, list) { +				nr++; +				total += e->hit_count; + +				if (++count >= MAX_TO_AVERAGE) +					break; +			} + +			head = mq->cache_dirty.qs + level;  			list_for_each_entry(e, head, list) {  				nr++;  				total += e->hit_count; @@ -631,19 +614,30 @@ static void requeue_and_update_tick(struct mq_policy *mq, struct entry *e)   * - set the hit count to a hard coded value other than 1, eg, is it better   *   if it goes in at level 2?   */ -static dm_cblock_t demote_cblock(struct mq_policy *mq, dm_oblock_t *oblock) +static int demote_cblock(struct mq_policy *mq, dm_oblock_t *oblock)  { -	dm_cblock_t result; -	struct entry *demoted = pop(mq, &mq->cache); +	struct entry *demoted = pop(mq, &mq->cache_clean); + +	if (!demoted) +		/* +		 * We could get a block from mq->cache_dirty, but that +		 * would add extra latency to the triggering bio as it +		 * waits for the writeback.  Better to not promote this +		 * time and hope there's a clean block next time this block +		 * is hit. +		 */ +		return -ENOSPC; -	BUG_ON(!demoted); -	result = demoted->cblock;  	*oblock = demoted->oblock; -	demoted->in_cache = false; -	demoted->hit_count = 1; -	push(mq, demoted); +	free_entry(&mq->cache_pool, demoted); -	return result; +	/* +	 * We used to put the demoted block into the pre-cache, but I think +	 * it's simpler to just let it work it's way up from zero again. +	 * Stops blocks flickering in and out of the cache. +	 */ + +	return 0;  }  /* @@ -655,24 +649,21 @@ static dm_cblock_t demote_cblock(struct mq_policy *mq, dm_oblock_t *oblock)   * We bias towards reads, since they can be demoted at no cost if they   * haven't been dirtied.   */ -#define DISCARDED_PROMOTE_THRESHOLD 1 -#define READ_PROMOTE_THRESHOLD 4 -#define WRITE_PROMOTE_THRESHOLD 8 -  static unsigned adjusted_promote_threshold(struct mq_policy *mq,  					   bool discarded_oblock, int data_dir)  { -	if (discarded_oblock && any_free_cblocks(mq) && data_dir == WRITE) +	if (data_dir == READ) +		return mq->promote_threshold + mq->read_promote_adjustment; + +	if (discarded_oblock && (any_free_cblocks(mq) || any_clean_cblocks(mq))) {  		/*  		 * We don't need to do any copying at all, so give this a -		 * very low threshold.  In practice this only triggers -		 * during initial population after a format. +		 * very low threshold.  		 */ -		return DISCARDED_PROMOTE_THRESHOLD; +		return mq->discard_promote_adjustment; +	} -	return data_dir == READ ? -		(mq->promote_threshold + READ_PROMOTE_THRESHOLD) : -		(mq->promote_threshold + WRITE_PROMOTE_THRESHOLD); +	return mq->promote_threshold + mq->write_promote_adjustment;  }  static bool should_promote(struct mq_policy *mq, struct entry *e, @@ -688,34 +679,49 @@ static int cache_entry_found(struct mq_policy *mq,  {  	requeue_and_update_tick(mq, e); -	if (e->in_cache) { +	if (in_cache(mq, e)) {  		result->op = POLICY_HIT; -		result->cblock = e->cblock; +		result->cblock = infer_cblock(&mq->cache_pool, e);  	}  	return 0;  }  /* - * Moves and entry from the pre_cache to the cache.  The main work is + * Moves an entry from the pre_cache to the cache.  The main work is   * finding which cache block to use.   */  static int pre_cache_to_cache(struct mq_policy *mq, struct entry *e,  			      struct policy_result *result)  { -	dm_cblock_t cblock; +	int r; +	struct entry *new_e; -	if (find_free_cblock(mq, &cblock) == -ENOSPC) { +	/* Ensure there's a free cblock in the cache */ +	if (epool_empty(&mq->cache_pool)) {  		result->op = POLICY_REPLACE; -		cblock = demote_cblock(mq, &result->old_oblock); +		r = demote_cblock(mq, &result->old_oblock); +		if (r) { +			result->op = POLICY_MISS; +			return 0; +		}  	} else  		result->op = POLICY_NEW; -	result->cblock = e->cblock = cblock; +	new_e = alloc_entry(&mq->cache_pool); +	BUG_ON(!new_e); + +	new_e->oblock = e->oblock; +	new_e->dirty = false; +	new_e->hit_count = e->hit_count; +	new_e->generation = e->generation; +	new_e->tick = e->tick;  	del(mq, e); -	e->in_cache = true; -	push(mq, e); +	free_entry(&mq->pre_cache_pool, e); +	push(mq, new_e); + +	result->cblock = infer_cblock(&mq->cache_pool, new_e);  	return 0;  } @@ -727,15 +733,18 @@ static int pre_cache_entry_found(struct mq_policy *mq, struct entry *e,  	int r = 0;  	bool updated = updated_this_tick(mq, e); -	requeue_and_update_tick(mq, e); -  	if ((!discarded_oblock && updated) || -	    !should_promote(mq, e, discarded_oblock, data_dir)) +	    !should_promote(mq, e, discarded_oblock, data_dir)) { +		requeue_and_update_tick(mq, e);  		result->op = POLICY_MISS; -	else if (!can_migrate) + +	} else if (!can_migrate)  		r = -EWOULDBLOCK; -	else + +	else { +		requeue_and_update_tick(mq, e);  		r = pre_cache_to_cache(mq, e, result); +	}  	return r;  } @@ -743,7 +752,7 @@ static int pre_cache_entry_found(struct mq_policy *mq, struct entry *e,  static void insert_in_pre_cache(struct mq_policy *mq,  				dm_oblock_t oblock)  { -	struct entry *e = alloc_entry(mq); +	struct entry *e = alloc_entry(&mq->pre_cache_pool);  	if (!e)  		/* @@ -757,7 +766,7 @@ static void insert_in_pre_cache(struct mq_policy *mq,  		return;  	} -	e->in_cache = false; +	e->dirty = false;  	e->oblock = oblock;  	e->hit_count = 1;  	e->generation = mq->generation; @@ -767,37 +776,43 @@ static void insert_in_pre_cache(struct mq_policy *mq,  static void insert_in_cache(struct mq_policy *mq, dm_oblock_t oblock,  			    struct policy_result *result)  { +	int r;  	struct entry *e; -	dm_cblock_t cblock; -	if (find_free_cblock(mq, &cblock) == -ENOSPC) { -		result->op = POLICY_MISS; -		insert_in_pre_cache(mq, oblock); -		return; -	} +	if (epool_empty(&mq->cache_pool)) { +		result->op = POLICY_REPLACE; +		r = demote_cblock(mq, &result->old_oblock); +		if (unlikely(r)) { +			result->op = POLICY_MISS; +			insert_in_pre_cache(mq, oblock); +			return; +		} -	e = alloc_entry(mq); -	if (unlikely(!e)) { -		result->op = POLICY_MISS; -		return; +		/* +		 * This will always succeed, since we've just demoted. +		 */ +		e = alloc_entry(&mq->cache_pool); +		BUG_ON(!e); + +	} else { +		e = alloc_entry(&mq->cache_pool); +		result->op = POLICY_NEW;  	}  	e->oblock = oblock; -	e->cblock = cblock; -	e->in_cache = true; +	e->dirty = false;  	e->hit_count = 1;  	e->generation = mq->generation;  	push(mq, e); -	result->op = POLICY_NEW; -	result->cblock = e->cblock; +	result->cblock = infer_cblock(&mq->cache_pool, e);  }  static int no_entry_found(struct mq_policy *mq, dm_oblock_t oblock,  			  bool can_migrate, bool discarded_oblock,  			  int data_dir, struct policy_result *result)  { -	if (adjusted_promote_threshold(mq, discarded_oblock, data_dir) == 1) { +	if (adjusted_promote_threshold(mq, discarded_oblock, data_dir) <= 1) {  		if (can_migrate)  			insert_in_cache(mq, oblock, result);  		else @@ -821,13 +836,16 @@ static int map(struct mq_policy *mq, dm_oblock_t oblock,  	int r = 0;  	struct entry *e = hash_lookup(mq, oblock); -	if (e && e->in_cache) +	if (e && in_cache(mq, e))  		r = cache_entry_found(mq, e, result); +  	else if (iot_pattern(&mq->tracker) == PATTERN_SEQUENTIAL)  		result->op = POLICY_MISS; +  	else if (e)  		r = pre_cache_entry_found(mq, e, can_migrate, discarded_oblock,  					  data_dir, result); +  	else  		r = no_entry_found(mq, oblock, can_migrate, discarded_oblock,  				   data_dir, result); @@ -854,9 +872,9 @@ static void mq_destroy(struct dm_cache_policy *p)  {  	struct mq_policy *mq = to_mq_policy(p); -	free_bitset(mq->allocation_bitset); -	kfree(mq->table); -	free_entries(mq); +	vfree(mq->table); +	epool_exit(&mq->cache_pool); +	epool_exit(&mq->pre_cache_pool);  	kfree(mq);  } @@ -904,8 +922,8 @@ static int mq_lookup(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t  		return -EWOULDBLOCK;  	e = hash_lookup(mq, oblock); -	if (e && e->in_cache) { -		*cblock = e->cblock; +	if (e && in_cache(mq, e)) { +		*cblock = infer_cblock(&mq->cache_pool, e);  		r = 0;  	} else  		r = -ENOENT; @@ -915,6 +933,36 @@ static int mq_lookup(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t  	return r;  } +static void __mq_set_clear_dirty(struct mq_policy *mq, dm_oblock_t oblock, bool set) +{ +	struct entry *e; + +	e = hash_lookup(mq, oblock); +	BUG_ON(!e || !in_cache(mq, e)); + +	del(mq, e); +	e->dirty = set; +	push(mq, e); +} + +static void mq_set_dirty(struct dm_cache_policy *p, dm_oblock_t oblock) +{ +	struct mq_policy *mq = to_mq_policy(p); + +	mutex_lock(&mq->lock); +	__mq_set_clear_dirty(mq, oblock, true); +	mutex_unlock(&mq->lock); +} + +static void mq_clear_dirty(struct dm_cache_policy *p, dm_oblock_t oblock) +{ +	struct mq_policy *mq = to_mq_policy(p); + +	mutex_lock(&mq->lock); +	__mq_set_clear_dirty(mq, oblock, false); +	mutex_unlock(&mq->lock); +} +  static int mq_load_mapping(struct dm_cache_policy *p,  			   dm_oblock_t oblock, dm_cblock_t cblock,  			   uint32_t hint, bool hint_valid) @@ -922,13 +970,9 @@ static int mq_load_mapping(struct dm_cache_policy *p,  	struct mq_policy *mq = to_mq_policy(p);  	struct entry *e; -	e = alloc_entry(mq); -	if (!e) -		return -ENOMEM; - -	e->cblock = cblock; +	e = alloc_particular_entry(&mq->cache_pool, cblock);  	e->oblock = oblock; -	e->in_cache = true; +	e->dirty = false;	/* this gets corrected in a minute */  	e->hit_count = hint_valid ? hint : 1;  	e->generation = mq->generation;  	push(mq, e); @@ -936,57 +980,126 @@ static int mq_load_mapping(struct dm_cache_policy *p,  	return 0;  } +static int mq_save_hints(struct mq_policy *mq, struct queue *q, +			 policy_walk_fn fn, void *context) +{ +	int r; +	unsigned level; +	struct entry *e; + +	for (level = 0; level < NR_QUEUE_LEVELS; level++) +		list_for_each_entry(e, q->qs + level, list) { +			r = fn(context, infer_cblock(&mq->cache_pool, e), +			       e->oblock, e->hit_count); +			if (r) +				return r; +		} + +	return 0; +} +  static int mq_walk_mappings(struct dm_cache_policy *p, policy_walk_fn fn,  			    void *context)  {  	struct mq_policy *mq = to_mq_policy(p);  	int r = 0; -	struct entry *e; -	unsigned level;  	mutex_lock(&mq->lock); -	for (level = 0; level < NR_QUEUE_LEVELS; level++) -		list_for_each_entry(e, &mq->cache.qs[level], list) { -			r = fn(context, e->cblock, e->oblock, e->hit_count); -			if (r) -				goto out; -		} +	r = mq_save_hints(mq, &mq->cache_clean, fn, context); +	if (!r) +		r = mq_save_hints(mq, &mq->cache_dirty, fn, context); -out:  	mutex_unlock(&mq->lock);  	return r;  } +static void __remove_mapping(struct mq_policy *mq, dm_oblock_t oblock) +{ +	struct entry *e; + +	e = hash_lookup(mq, oblock); +	BUG_ON(!e || !in_cache(mq, e)); + +	del(mq, e); +	free_entry(&mq->cache_pool, e); +} +  static void mq_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock)  {  	struct mq_policy *mq = to_mq_policy(p); -	struct entry *e;  	mutex_lock(&mq->lock); +	__remove_mapping(mq, oblock); +	mutex_unlock(&mq->lock); +} -	e = hash_lookup(mq, oblock); +static int __remove_cblock(struct mq_policy *mq, dm_cblock_t cblock) +{ +	struct entry *e = epool_find(&mq->cache_pool, cblock); -	BUG_ON(!e || !e->in_cache); +	if (!e) +		return -ENODATA;  	del(mq, e); -	e->in_cache = false; -	push(mq, e); +	free_entry(&mq->cache_pool, e); + +	return 0; +} + +static int mq_remove_cblock(struct dm_cache_policy *p, dm_cblock_t cblock) +{ +	int r; +	struct mq_policy *mq = to_mq_policy(p); +	mutex_lock(&mq->lock); +	r = __remove_cblock(mq, cblock);  	mutex_unlock(&mq->lock); + +	return r;  } -static void force_mapping(struct mq_policy *mq, -			  dm_oblock_t current_oblock, dm_oblock_t new_oblock) +static int __mq_writeback_work(struct mq_policy *mq, dm_oblock_t *oblock, +			      dm_cblock_t *cblock)  { -	struct entry *e = hash_lookup(mq, current_oblock); +	struct entry *e = pop(mq, &mq->cache_dirty); -	BUG_ON(!e || !e->in_cache); +	if (!e) +		return -ENODATA; -	del(mq, e); -	e->oblock = new_oblock; +	*oblock = e->oblock; +	*cblock = infer_cblock(&mq->cache_pool, e); +	e->dirty = false;  	push(mq, e); + +	return 0; +} + +static int mq_writeback_work(struct dm_cache_policy *p, dm_oblock_t *oblock, +			     dm_cblock_t *cblock) +{ +	int r; +	struct mq_policy *mq = to_mq_policy(p); + +	mutex_lock(&mq->lock); +	r = __mq_writeback_work(mq, oblock, cblock); +	mutex_unlock(&mq->lock); + +	return r; +} + +static void __force_mapping(struct mq_policy *mq, +			    dm_oblock_t current_oblock, dm_oblock_t new_oblock) +{ +	struct entry *e = hash_lookup(mq, current_oblock); + +	if (e && in_cache(mq, e)) { +		del(mq, e); +		e->oblock = new_oblock; +		e->dirty = true; +		push(mq, e); +	}  }  static void mq_force_mapping(struct dm_cache_policy *p, @@ -995,16 +1108,20 @@ static void mq_force_mapping(struct dm_cache_policy *p,  	struct mq_policy *mq = to_mq_policy(p);  	mutex_lock(&mq->lock); -	force_mapping(mq, current_oblock, new_oblock); +	__force_mapping(mq, current_oblock, new_oblock);  	mutex_unlock(&mq->lock);  }  static dm_cblock_t mq_residency(struct dm_cache_policy *p)  { +	dm_cblock_t r;  	struct mq_policy *mq = to_mq_policy(p); -	/* FIXME: lock mutex, not sure we can block here */ -	return to_cblock(mq->nr_cblocks_allocated); +	mutex_lock(&mq->lock); +	r = to_cblock(mq->cache_pool.nr_allocated); +	mutex_unlock(&mq->lock); + +	return r;  }  static void mq_tick(struct dm_cache_policy *p) @@ -1021,20 +1138,28 @@ static int mq_set_config_value(struct dm_cache_policy *p,  			       const char *key, const char *value)  {  	struct mq_policy *mq = to_mq_policy(p); -	enum io_pattern pattern;  	unsigned long tmp; -	if (!strcasecmp(key, "random_threshold")) -		pattern = PATTERN_RANDOM; -	else if (!strcasecmp(key, "sequential_threshold")) -		pattern = PATTERN_SEQUENTIAL; -	else -		return -EINVAL; -  	if (kstrtoul(value, 10, &tmp))  		return -EINVAL; -	mq->tracker.thresholds[pattern] = tmp; +	if (!strcasecmp(key, "random_threshold")) { +		mq->tracker.thresholds[PATTERN_RANDOM] = tmp; + +	} else if (!strcasecmp(key, "sequential_threshold")) { +		mq->tracker.thresholds[PATTERN_SEQUENTIAL] = tmp; + +	} else if (!strcasecmp(key, "discard_promote_adjustment")) +		mq->discard_promote_adjustment = tmp; + +	else if (!strcasecmp(key, "read_promote_adjustment")) +		mq->read_promote_adjustment = tmp; + +	else if (!strcasecmp(key, "write_promote_adjustment")) +		mq->write_promote_adjustment = tmp; + +	else +		return -EINVAL;  	return 0;  } @@ -1044,9 +1169,16 @@ static int mq_emit_config_values(struct dm_cache_policy *p, char *result, unsign  	ssize_t sz = 0;  	struct mq_policy *mq = to_mq_policy(p); -	DMEMIT("4 random_threshold %u sequential_threshold %u", +	DMEMIT("10 random_threshold %u " +	       "sequential_threshold %u " +	       "discard_promote_adjustment %u " +	       "read_promote_adjustment %u " +	       "write_promote_adjustment %u",  	       mq->tracker.thresholds[PATTERN_RANDOM], -	       mq->tracker.thresholds[PATTERN_SEQUENTIAL]); +	       mq->tracker.thresholds[PATTERN_SEQUENTIAL], +	       mq->discard_promote_adjustment, +	       mq->read_promote_adjustment, +	       mq->write_promote_adjustment);  	return 0;  } @@ -1057,10 +1189,13 @@ static void init_policy_functions(struct mq_policy *mq)  	mq->policy.destroy = mq_destroy;  	mq->policy.map = mq_map;  	mq->policy.lookup = mq_lookup; +	mq->policy.set_dirty = mq_set_dirty; +	mq->policy.clear_dirty = mq_clear_dirty;  	mq->policy.load_mapping = mq_load_mapping;  	mq->policy.walk_mappings = mq_walk_mappings;  	mq->policy.remove_mapping = mq_remove_mapping; -	mq->policy.writeback_work = NULL; +	mq->policy.remove_cblock = mq_remove_cblock; +	mq->policy.writeback_work = mq_writeback_work;  	mq->policy.force_mapping = mq_force_mapping;  	mq->policy.residency = mq_residency;  	mq->policy.tick = mq_tick; @@ -1072,7 +1207,6 @@ static struct dm_cache_policy *mq_create(dm_cblock_t cache_size,  					 sector_t origin_size,  					 sector_t cache_block_size)  { -	int r;  	struct mq_policy *mq = kzalloc(sizeof(*mq), GFP_KERNEL);  	if (!mq) @@ -1080,47 +1214,48 @@ static struct dm_cache_policy *mq_create(dm_cblock_t cache_size,  	init_policy_functions(mq);  	iot_init(&mq->tracker, SEQUENTIAL_THRESHOLD_DEFAULT, RANDOM_THRESHOLD_DEFAULT); -  	mq->cache_size = cache_size; + +	if (epool_init(&mq->pre_cache_pool, from_cblock(cache_size))) { +		DMERR("couldn't initialize pool of pre-cache entries"); +		goto bad_pre_cache_init; +	} + +	if (epool_init(&mq->cache_pool, from_cblock(cache_size))) { +		DMERR("couldn't initialize pool of cache entries"); +		goto bad_cache_init; +	} +  	mq->tick_protected = 0;  	mq->tick = 0;  	mq->hit_count = 0;  	mq->generation = 0;  	mq->promote_threshold = 0; +	mq->discard_promote_adjustment = DEFAULT_DISCARD_PROMOTE_ADJUSTMENT; +	mq->read_promote_adjustment = DEFAULT_READ_PROMOTE_ADJUSTMENT; +	mq->write_promote_adjustment = DEFAULT_WRITE_PROMOTE_ADJUSTMENT;  	mutex_init(&mq->lock);  	spin_lock_init(&mq->tick_lock); -	mq->find_free_nr_words = dm_div_up(from_cblock(mq->cache_size), BITS_PER_LONG); -	mq->find_free_last_word = 0;  	queue_init(&mq->pre_cache); -	queue_init(&mq->cache); -	mq->generation_period = max((unsigned) from_cblock(cache_size), 1024U); - -	mq->nr_entries = 2 * from_cblock(cache_size); -	r = alloc_entries(mq, mq->nr_entries); -	if (r) -		goto bad_cache_alloc; +	queue_init(&mq->cache_clean); +	queue_init(&mq->cache_dirty); -	mq->nr_entries_allocated = 0; -	mq->nr_cblocks_allocated = 0; +	mq->generation_period = max((unsigned) from_cblock(cache_size), 1024U);  	mq->nr_buckets = next_power(from_cblock(cache_size) / 2, 16);  	mq->hash_bits = ffs(mq->nr_buckets) - 1; -	mq->table = kzalloc(sizeof(*mq->table) * mq->nr_buckets, GFP_KERNEL); +	mq->table = vzalloc(sizeof(*mq->table) * mq->nr_buckets);  	if (!mq->table)  		goto bad_alloc_table; -	mq->allocation_bitset = alloc_bitset(from_cblock(cache_size)); -	if (!mq->allocation_bitset) -		goto bad_alloc_bitset; -  	return &mq->policy; -bad_alloc_bitset: -	kfree(mq->table);  bad_alloc_table: -	free_entries(mq); -bad_cache_alloc: +	epool_exit(&mq->cache_pool); +bad_cache_init: +	epool_exit(&mq->pre_cache_pool); +bad_pre_cache_init:  	kfree(mq);  	return NULL; @@ -1130,7 +1265,7 @@ bad_cache_alloc:  static struct dm_cache_policy_type mq_policy_type = {  	.name = "mq", -	.version = {1, 0, 0}, +	.version = {1, 2, 0},  	.hint_size = 4,  	.owner = THIS_MODULE,  	.create = mq_create @@ -1138,10 +1273,11 @@ static struct dm_cache_policy_type mq_policy_type = {  static struct dm_cache_policy_type default_policy_type = {  	.name = "default", -	.version = {1, 0, 0}, +	.version = {1, 2, 0},  	.hint_size = 4,  	.owner = THIS_MODULE, -	.create = mq_create +	.create = mq_create, +	.real = &mq_policy_type  };  static int __init mq_init(void) diff --git a/drivers/md/dm-cache-policy.c b/drivers/md/dm-cache-policy.c index 21c03c570c0..c1a3cee99b4 100644 --- a/drivers/md/dm-cache-policy.c +++ b/drivers/md/dm-cache-policy.c @@ -119,13 +119,13 @@ struct dm_cache_policy *dm_cache_policy_create(const char *name,  	type = get_policy(name);  	if (!type) {  		DMWARN("unknown policy type"); -		return NULL; +		return ERR_PTR(-EINVAL);  	}  	p = type->create(cache_size, origin_size, cache_block_size);  	if (!p) {  		put_policy(type); -		return NULL; +		return ERR_PTR(-ENOMEM);  	}  	p->private = type; @@ -146,6 +146,10 @@ const char *dm_cache_policy_get_name(struct dm_cache_policy *p)  {  	struct dm_cache_policy_type *t = p->private; +	/* if t->real is set then an alias was used (e.g. "default") */ +	if (t->real) +		return t->real->name; +  	return t->name;  }  EXPORT_SYMBOL_GPL(dm_cache_policy_get_name); diff --git a/drivers/md/dm-cache-policy.h b/drivers/md/dm-cache-policy.h index 33369ca9614..f50fe360c54 100644 --- a/drivers/md/dm-cache-policy.h +++ b/drivers/md/dm-cache-policy.h @@ -135,9 +135,6 @@ struct dm_cache_policy {  	 */  	int (*lookup)(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock); -	/* -	 * oblock must be a mapped block.  Must not block. -	 */  	void (*set_dirty)(struct dm_cache_policy *p, dm_oblock_t oblock);  	void (*clear_dirty)(struct dm_cache_policy *p, dm_oblock_t oblock); @@ -159,8 +156,24 @@ struct dm_cache_policy {  	void (*force_mapping)(struct dm_cache_policy *p, dm_oblock_t current_oblock,  			      dm_oblock_t new_oblock); -	int (*writeback_work)(struct dm_cache_policy *p, dm_oblock_t *oblock, dm_cblock_t *cblock); +	/* +	 * This is called via the invalidate_cblocks message.  It is +	 * possible the particular cblock has already been removed due to a +	 * write io in passthrough mode.  In which case this should return +	 * -ENODATA. +	 */ +	int (*remove_cblock)(struct dm_cache_policy *p, dm_cblock_t cblock); +	/* +	 * Provide a dirty block to be written back by the core target. +	 * +	 * Returns: +	 * +	 * 0 and @cblock,@oblock: block to write back provided +	 * +	 * -ENODATA: no dirty blocks available +	 */ +	int (*writeback_work)(struct dm_cache_policy *p, dm_oblock_t *oblock, dm_cblock_t *cblock);  	/*  	 * How full is the cache? @@ -210,6 +223,12 @@ struct dm_cache_policy_type {  	unsigned version[CACHE_POLICY_VERSION_SIZE];  	/* +	 * For use by an alias dm_cache_policy_type to point to the +	 * real dm_cache_policy_type. +	 */ +	struct dm_cache_policy_type *real; + +	/*  	 * Policies may store a hint for each each cache block.  	 * Currently the size of this hint must be 0 or 4 bytes but we  	 * expect to relax this in future. diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 29569768ffb..2c63326638b 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -61,6 +61,40 @@ static void free_bitset(unsigned long *bits)  /*----------------------------------------------------------------*/ +/* + * There are a couple of places where we let a bio run, but want to do some + * work before calling its endio function.  We do this by temporarily + * changing the endio fn. + */ +struct dm_hook_info { +	bio_end_io_t *bi_end_io; +	void *bi_private; +}; + +static void dm_hook_bio(struct dm_hook_info *h, struct bio *bio, +			bio_end_io_t *bi_end_io, void *bi_private) +{ +	h->bi_end_io = bio->bi_end_io; +	h->bi_private = bio->bi_private; + +	bio->bi_end_io = bi_end_io; +	bio->bi_private = bi_private; +} + +static void dm_unhook_bio(struct dm_hook_info *h, struct bio *bio) +{ +	bio->bi_end_io = h->bi_end_io; +	bio->bi_private = h->bi_private; + +	/* +	 * Must bump bi_remaining to allow bio to complete with +	 * restored bi_end_io. +	 */ +	atomic_inc(&bio->bi_remaining); +} + +/*----------------------------------------------------------------*/ +  #define PRISON_CELLS 1024  #define MIGRATION_POOL_SIZE 128  #define COMMIT_PERIOD HZ @@ -76,14 +110,37 @@ static void free_bitset(unsigned long *bits)  /*   * FIXME: the cache is read/write for the time being.   */ -enum cache_mode { +enum cache_metadata_mode {  	CM_WRITE,		/* metadata may be changed */  	CM_READ_ONLY,		/* metadata may not be changed */  }; +enum cache_io_mode { +	/* +	 * Data is written to cached blocks only.  These blocks are marked +	 * dirty.  If you lose the cache device you will lose data. +	 * Potential performance increase for both reads and writes. +	 */ +	CM_IO_WRITEBACK, + +	/* +	 * Data is written to both cache and origin.  Blocks are never +	 * dirty.  Potential performance benfit for reads only. +	 */ +	CM_IO_WRITETHROUGH, + +	/* +	 * A degraded mode useful for various cache coherency situations +	 * (eg, rolling back snapshots).  Reads and writes always go to the +	 * origin.  If a write goes to a cached oblock, then the cache +	 * block is invalidated. +	 */ +	CM_IO_PASSTHROUGH +}; +  struct cache_features { -	enum cache_mode mode; -	bool write_through:1; +	enum cache_metadata_mode mode; +	enum cache_io_mode io_mode;  };  struct cache_stats { @@ -99,6 +156,25 @@ struct cache_stats {  	atomic_t discard_count;  }; +/* + * Defines a range of cblocks, begin to (end - 1) are in the range.  end is + * the one-past-the-end value. + */ +struct cblock_range { +	dm_cblock_t begin; +	dm_cblock_t end; +}; + +struct invalidation_request { +	struct list_head list; +	struct cblock_range *cblocks; + +	atomic_t complete; +	int err; + +	wait_queue_head_t result_wait; +}; +  struct cache {  	struct dm_target *ti;  	struct dm_target_callbacks callbacks; @@ -148,18 +224,21 @@ struct cache {  	wait_queue_head_t migration_wait;  	atomic_t nr_migrations; +	wait_queue_head_t quiescing_wait; +	atomic_t quiescing; +	atomic_t quiescing_ack; +  	/*  	 * cache_size entries, dirty if set  	 */ -	dm_cblock_t nr_dirty; +	atomic_t nr_dirty;  	unsigned long *dirty_bitset;  	/*  	 * origin_blocks entries, discarded if set.  	 */ -	dm_dblock_t discard_nr_blocks; +	dm_oblock_t discard_nr_blocks;  	unsigned long *discard_bitset; -	uint32_t discard_block_size; /* a power of 2 times sectors per block */  	/*  	 * Rather than reconstructing the table line for the status we just @@ -186,7 +265,7 @@ struct cache {  	bool need_tick_bio:1;  	bool sized:1; -	bool quiescing:1; +	bool invalidate:1;  	bool commit_requested:1;  	bool loaded_mappings:1;  	bool loaded_discards:1; @@ -197,12 +276,19 @@ struct cache {  	struct cache_features features;  	struct cache_stats stats; + +	/* +	 * Invalidation fields. +	 */ +	spinlock_t invalidation_lock; +	struct list_head invalidation_requests;  };  struct per_bio_data {  	bool tick:1;  	unsigned req_nr:2;  	struct dm_deferred_entry *all_io_entry; +	struct dm_hook_info hook_info;  	/*  	 * writethrough fields.  These MUST remain at the end of this @@ -211,7 +297,6 @@ struct per_bio_data {  	 */  	struct cache *cache;  	dm_cblock_t cblock; -	bio_end_io_t *saved_bi_end_io;  	struct dm_bio_details bio_details;  }; @@ -228,6 +313,8 @@ struct dm_cache_migration {  	bool writeback:1;  	bool demote:1;  	bool promote:1; +	bool requeue_holder:1; +	bool invalidate:1;  	struct dm_bio_prison_cell *old_ocell;  	struct dm_bio_prison_cell *new_ocell; @@ -405,7 +492,7 @@ static bool is_dirty(struct cache *cache, dm_cblock_t b)  static void set_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock)  {  	if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset)) { -		cache->nr_dirty = to_cblock(from_cblock(cache->nr_dirty) + 1); +		atomic_inc(&cache->nr_dirty);  		policy_set_dirty(cache->policy, oblock);  	}  } @@ -414,8 +501,7 @@ static void clear_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cbl  {  	if (test_and_clear_bit(from_cblock(cblock), cache->dirty_bitset)) {  		policy_clear_dirty(cache->policy, oblock); -		cache->nr_dirty = to_cblock(from_cblock(cache->nr_dirty) - 1); -		if (!from_cblock(cache->nr_dirty)) +		if (atomic_dec_return(&cache->nr_dirty) == 0)  			dm_table_event(cache->ti->table);  	}  } @@ -438,48 +524,33 @@ static dm_block_t block_div(dm_block_t b, uint32_t n)  	return b;  } -static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock) -{ -	uint32_t discard_blocks = cache->discard_block_size; -	dm_block_t b = from_oblock(oblock); - -	if (!block_size_is_power_of_two(cache)) -		discard_blocks = discard_blocks / cache->sectors_per_block; -	else -		discard_blocks >>= cache->sectors_per_block_shift; - -	b = block_div(b, discard_blocks); - -	return to_dblock(b); -} - -static void set_discard(struct cache *cache, dm_dblock_t b) +static void set_discard(struct cache *cache, dm_oblock_t b)  {  	unsigned long flags;  	atomic_inc(&cache->stats.discard_count);  	spin_lock_irqsave(&cache->lock, flags); -	set_bit(from_dblock(b), cache->discard_bitset); +	set_bit(from_oblock(b), cache->discard_bitset);  	spin_unlock_irqrestore(&cache->lock, flags);  } -static void clear_discard(struct cache *cache, dm_dblock_t b) +static void clear_discard(struct cache *cache, dm_oblock_t b)  {  	unsigned long flags;  	spin_lock_irqsave(&cache->lock, flags); -	clear_bit(from_dblock(b), cache->discard_bitset); +	clear_bit(from_oblock(b), cache->discard_bitset);  	spin_unlock_irqrestore(&cache->lock, flags);  } -static bool is_discarded(struct cache *cache, dm_dblock_t b) +static bool is_discarded(struct cache *cache, dm_oblock_t b)  {  	int r;  	unsigned long flags;  	spin_lock_irqsave(&cache->lock, flags); -	r = test_bit(from_dblock(b), cache->discard_bitset); +	r = test_bit(from_oblock(b), cache->discard_bitset);  	spin_unlock_irqrestore(&cache->lock, flags);  	return r; @@ -491,8 +562,7 @@ static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b)  	unsigned long flags;  	spin_lock_irqsave(&cache->lock, flags); -	r = test_bit(from_dblock(oblock_to_dblock(cache, b)), -		     cache->discard_bitset); +	r = test_bit(from_oblock(b), cache->discard_bitset);  	spin_unlock_irqrestore(&cache->lock, flags);  	return r; @@ -533,9 +603,24 @@ static void save_stats(struct cache *cache)  #define PB_DATA_SIZE_WB (offsetof(struct per_bio_data, cache))  #define PB_DATA_SIZE_WT (sizeof(struct per_bio_data)) +static bool writethrough_mode(struct cache_features *f) +{ +	return f->io_mode == CM_IO_WRITETHROUGH; +} + +static bool writeback_mode(struct cache_features *f) +{ +	return f->io_mode == CM_IO_WRITEBACK; +} + +static bool passthrough_mode(struct cache_features *f) +{ +	return f->io_mode == CM_IO_PASSTHROUGH; +} +  static size_t get_per_bio_data_size(struct cache *cache)  { -	return cache->features.write_through ? PB_DATA_SIZE_WT : PB_DATA_SIZE_WB; +	return writethrough_mode(&cache->features) ? PB_DATA_SIZE_WT : PB_DATA_SIZE_WB;  }  static struct per_bio_data *get_per_bio_data(struct bio *bio, size_t data_size) @@ -567,15 +652,18 @@ static void remap_to_origin(struct cache *cache, struct bio *bio)  static void remap_to_cache(struct cache *cache, struct bio *bio,  			   dm_cblock_t cblock)  { -	sector_t bi_sector = bio->bi_sector; +	sector_t bi_sector = bio->bi_iter.bi_sector; +	sector_t block = from_cblock(cblock);  	bio->bi_bdev = cache->cache_dev->bdev;  	if (!block_size_is_power_of_two(cache)) -		bio->bi_sector = (from_cblock(cblock) * cache->sectors_per_block) + -				sector_div(bi_sector, cache->sectors_per_block); +		bio->bi_iter.bi_sector = +			(block * cache->sectors_per_block) + +			sector_div(bi_sector, cache->sectors_per_block);  	else -		bio->bi_sector = (from_cblock(cblock) << cache->sectors_per_block_shift) | -				(bi_sector & (cache->sectors_per_block - 1)); +		bio->bi_iter.bi_sector = +			(block << cache->sectors_per_block_shift) | +			(bi_sector & (cache->sectors_per_block - 1));  }  static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio) @@ -599,22 +687,23 @@ static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,  	check_if_tick_bio_needed(cache, bio);  	remap_to_origin(cache, bio);  	if (bio_data_dir(bio) == WRITE) -		clear_discard(cache, oblock_to_dblock(cache, oblock)); +		clear_discard(cache, oblock);  }  static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,  				 dm_oblock_t oblock, dm_cblock_t cblock)  { +	check_if_tick_bio_needed(cache, bio);  	remap_to_cache(cache, bio, cblock);  	if (bio_data_dir(bio) == WRITE) {  		set_dirty(cache, oblock, cblock); -		clear_discard(cache, oblock_to_dblock(cache, oblock)); +		clear_discard(cache, oblock);  	}  }  static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)  { -	sector_t block_nr = bio->bi_sector; +	sector_t block_nr = bio->bi_iter.bi_sector;  	if (!block_size_is_power_of_two(cache))  		(void) sector_div(block_nr, cache->sectors_per_block); @@ -662,7 +751,8 @@ static void defer_writethrough_bio(struct cache *cache, struct bio *bio)  static void writethrough_endio(struct bio *bio, int err)  {  	struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT); -	bio->bi_end_io = pb->saved_bi_end_io; + +	dm_unhook_bio(&pb->hook_info, bio);  	if (err) {  		bio_endio(bio, err); @@ -693,9 +783,8 @@ static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio,  	pb->cache = cache;  	pb->cblock = cblock; -	pb->saved_bi_end_io = bio->bi_end_io; +	dm_hook_bio(&pb->hook_info, bio, writethrough_endio, NULL);  	dm_bio_record(&pb->bio_details, bio); -	bio->bi_end_io = writethrough_endio;  	remap_to_origin_clear_discard(pb->cache, bio, oblock);  } @@ -748,8 +837,9 @@ static void cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,  static void cleanup_migration(struct dm_cache_migration *mg)  { -	dec_nr_migrations(mg->cache); +	struct cache *cache = mg->cache;  	free_migration(mg); +	dec_nr_migrations(cache);  }  static void migration_failure(struct dm_cache_migration *mg) @@ -765,13 +855,13 @@ static void migration_failure(struct dm_cache_migration *mg)  		DMWARN_LIMIT("demotion failed; couldn't copy block");  		policy_force_mapping(cache->policy, mg->new_oblock, mg->old_oblock); -		cell_defer(cache, mg->old_ocell, mg->promote ? 0 : 1); +		cell_defer(cache, mg->old_ocell, mg->promote ? false : true);  		if (mg->promote) -			cell_defer(cache, mg->new_ocell, 1); +			cell_defer(cache, mg->new_ocell, true);  	} else {  		DMWARN_LIMIT("promotion failed; couldn't copy block");  		policy_remove_mapping(cache->policy, mg->new_oblock); -		cell_defer(cache, mg->new_ocell, 1); +		cell_defer(cache, mg->new_ocell, true);  	}  	cleanup_migration(mg); @@ -823,7 +913,7 @@ static void migration_success_post_commit(struct dm_cache_migration *mg)  		return;  	} else if (mg->demote) { -		cell_defer(cache, mg->old_ocell, mg->promote ? 0 : 1); +		cell_defer(cache, mg->old_ocell, mg->promote ? false : true);  		if (mg->promote) {  			mg->demote = false; @@ -832,11 +922,19 @@ static void migration_success_post_commit(struct dm_cache_migration *mg)  			list_add_tail(&mg->list, &cache->quiesced_migrations);  			spin_unlock_irqrestore(&cache->lock, flags); -		} else +		} else { +			if (mg->invalidate) +				policy_remove_mapping(cache->policy, mg->old_oblock);  			cleanup_migration(mg); +		}  	} else { -		cell_defer(cache, mg->new_ocell, true); +		if (mg->requeue_holder) +			cell_defer(cache, mg->new_ocell, true); +		else { +			bio_endio(mg->new_ocell->holder, 0); +			cell_defer(cache, mg->new_ocell, false); +		}  		clear_dirty(cache, mg->new_oblock, mg->cblock);  		cleanup_migration(mg);  	} @@ -863,12 +961,13 @@ static void issue_copy_real(struct dm_cache_migration *mg)  	int r;  	struct dm_io_region o_region, c_region;  	struct cache *cache = mg->cache; +	sector_t cblock = from_cblock(mg->cblock);  	o_region.bdev = cache->origin_dev->bdev;  	o_region.count = cache->sectors_per_block;  	c_region.bdev = cache->cache_dev->bdev; -	c_region.sector = from_cblock(mg->cblock) * cache->sectors_per_block; +	c_region.sector = cblock * cache->sectors_per_block;  	c_region.count = cache->sectors_per_block;  	if (mg->writeback || mg->demote) { @@ -881,8 +980,48 @@ static void issue_copy_real(struct dm_cache_migration *mg)  		r = dm_kcopyd_copy(cache->copier, &o_region, 1, &c_region, 0, copy_complete, mg);  	} -	if (r < 0) +	if (r < 0) { +		DMERR_LIMIT("issuing migration failed");  		migration_failure(mg); +	} +} + +static void overwrite_endio(struct bio *bio, int err) +{ +	struct dm_cache_migration *mg = bio->bi_private; +	struct cache *cache = mg->cache; +	size_t pb_data_size = get_per_bio_data_size(cache); +	struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); +	unsigned long flags; + +	dm_unhook_bio(&pb->hook_info, bio); + +	if (err) +		mg->err = true; + +	mg->requeue_holder = false; + +	spin_lock_irqsave(&cache->lock, flags); +	list_add_tail(&mg->list, &cache->completed_migrations); +	spin_unlock_irqrestore(&cache->lock, flags); + +	wake_worker(cache); +} + +static void issue_overwrite(struct dm_cache_migration *mg, struct bio *bio) +{ +	size_t pb_data_size = get_per_bio_data_size(mg->cache); +	struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); + +	dm_hook_bio(&pb->hook_info, bio, overwrite_endio, mg); +	remap_to_cache_dirty(mg->cache, bio, mg->new_oblock, mg->cblock); +	generic_make_request(bio); +} + +static bool bio_writes_complete_block(struct cache *cache, struct bio *bio) +{ +	return (bio_data_dir(bio) == WRITE) && +		(bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT));  }  static void avoid_copy(struct dm_cache_migration *mg) @@ -899,9 +1038,17 @@ static void issue_copy(struct dm_cache_migration *mg)  	if (mg->writeback || mg->demote)  		avoid = !is_dirty(cache, mg->cblock) ||  			is_discarded_oblock(cache, mg->old_oblock); -	else +	else { +		struct bio *bio = mg->new_ocell->holder; +  		avoid = is_discarded_oblock(cache, mg->new_oblock); +		if (!avoid && bio_writes_complete_block(cache, bio)) { +			issue_overwrite(mg, bio); +			return; +		} +	} +  	avoid ? avoid_copy(mg) : issue_copy_real(mg);  } @@ -991,6 +1138,8 @@ static void promote(struct cache *cache, struct prealloc *structs,  	mg->writeback = false;  	mg->demote = false;  	mg->promote = true; +	mg->requeue_holder = true; +	mg->invalidate = false;  	mg->cache = cache;  	mg->new_oblock = oblock;  	mg->cblock = cblock; @@ -1012,6 +1161,8 @@ static void writeback(struct cache *cache, struct prealloc *structs,  	mg->writeback = true;  	mg->demote = false;  	mg->promote = false; +	mg->requeue_holder = true; +	mg->invalidate = false;  	mg->cache = cache;  	mg->old_oblock = oblock;  	mg->cblock = cblock; @@ -1035,6 +1186,8 @@ static void demote_then_promote(struct cache *cache, struct prealloc *structs,  	mg->writeback = false;  	mg->demote = true;  	mg->promote = true; +	mg->requeue_holder = true; +	mg->invalidate = false;  	mg->cache = cache;  	mg->old_oblock = old_oblock;  	mg->new_oblock = new_oblock; @@ -1047,6 +1200,33 @@ static void demote_then_promote(struct cache *cache, struct prealloc *structs,  	quiesce_migration(mg);  } +/* + * Invalidate a cache entry.  No writeback occurs; any changes in the cache + * block are thrown away. + */ +static void invalidate(struct cache *cache, struct prealloc *structs, +		       dm_oblock_t oblock, dm_cblock_t cblock, +		       struct dm_bio_prison_cell *cell) +{ +	struct dm_cache_migration *mg = prealloc_get_migration(structs); + +	mg->err = false; +	mg->writeback = false; +	mg->demote = true; +	mg->promote = false; +	mg->requeue_holder = true; +	mg->invalidate = true; +	mg->cache = cache; +	mg->old_oblock = oblock; +	mg->cblock = cblock; +	mg->old_ocell = cell; +	mg->new_ocell = NULL; +	mg->start_jiffies = jiffies; + +	inc_nr_migrations(cache); +	quiesce_migration(mg); +} +  /*----------------------------------------------------------------   * bio processing   *--------------------------------------------------------------*/ @@ -1066,7 +1246,7 @@ static void process_flush_bio(struct cache *cache, struct bio *bio)  	size_t pb_data_size = get_per_bio_data_size(cache);  	struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); -	BUG_ON(bio->bi_size); +	BUG_ON(bio->bi_iter.bi_size);  	if (!pb->req_nr)  		remap_to_origin(cache, bio);  	else @@ -1089,15 +1269,15 @@ static void process_flush_bio(struct cache *cache, struct bio *bio)   */  static void process_discard_bio(struct cache *cache, struct bio *bio)  { -	dm_block_t start_block = dm_sector_div_up(bio->bi_sector, -						  cache->discard_block_size); -	dm_block_t end_block = bio->bi_sector + bio_sectors(bio); +	dm_block_t start_block = dm_sector_div_up(bio->bi_iter.bi_sector, +						  cache->sectors_per_block); +	dm_block_t end_block = bio_end_sector(bio);  	dm_block_t b; -	end_block = block_div(end_block, cache->discard_block_size); +	end_block = block_div(end_block, cache->sectors_per_block);  	for (b = start_block; b < end_block; b++) -		set_discard(cache, to_dblock(b)); +		set_discard(cache, to_oblock(b));  	bio_endio(bio, 0);  } @@ -1109,13 +1289,6 @@ static bool spare_migration_bandwidth(struct cache *cache)  	return current_volume < cache->migration_threshold;  } -static bool is_writethrough_io(struct cache *cache, struct bio *bio, -			       dm_cblock_t cblock) -{ -	return bio_data_dir(bio) == WRITE && -		cache->features.write_through && !is_dirty(cache, cblock); -} -  static void inc_hit_counter(struct cache *cache, struct bio *bio)  {  	atomic_inc(bio_data_dir(bio) == READ ? @@ -1128,6 +1301,15 @@ static void inc_miss_counter(struct cache *cache, struct bio *bio)  		   &cache->stats.read_miss : &cache->stats.write_miss);  } +static void issue_cache_bio(struct cache *cache, struct bio *bio, +			    struct per_bio_data *pb, +			    dm_oblock_t oblock, dm_cblock_t cblock) +{ +	pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); +	remap_to_cache_dirty(cache, bio, oblock, cblock); +	issue(cache, bio); +} +  static void process_bio(struct cache *cache, struct prealloc *structs,  			struct bio *bio)  { @@ -1139,7 +1321,8 @@ static void process_bio(struct cache *cache, struct prealloc *structs,  	size_t pb_data_size = get_per_bio_data_size(cache);  	struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);  	bool discarded_block = is_discarded_oblock(cache, block); -	bool can_migrate = discarded_block || spare_migration_bandwidth(cache); +	bool passthrough = passthrough_mode(&cache->features); +	bool can_migrate = !passthrough && (discarded_block || spare_migration_bandwidth(cache));  	/*  	 * Check to see if that block is currently migrating. @@ -1160,15 +1343,39 @@ static void process_bio(struct cache *cache, struct prealloc *structs,  	switch (lookup_result.op) {  	case POLICY_HIT: -		inc_hit_counter(cache, bio); -		pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); +		if (passthrough) { +			inc_miss_counter(cache, bio); -		if (is_writethrough_io(cache, bio, lookup_result.cblock)) -			remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock); -		else -			remap_to_cache_dirty(cache, bio, block, lookup_result.cblock); +			/* +			 * Passthrough always maps to the origin, +			 * invalidating any cache blocks that are written +			 * to. +			 */ + +			if (bio_data_dir(bio) == WRITE) { +				atomic_inc(&cache->stats.demotion); +				invalidate(cache, structs, block, lookup_result.cblock, new_ocell); +				release_cell = false; + +			} else { +				/* FIXME: factor out issue_origin() */ +				pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); +				remap_to_origin_clear_discard(cache, bio, block); +				issue(cache, bio); +			} +		} else { +			inc_hit_counter(cache, bio); + +			if (bio_data_dir(bio) == WRITE && +			    writethrough_mode(&cache->features) && +			    !is_dirty(cache, lookup_result.cblock)) { +				pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); +				remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock); +				issue(cache, bio); +			} else +				issue_cache_bio(cache, bio, pb, block, lookup_result.cblock); +		} -		issue(cache, bio);  		break;  	case POLICY_MISS: @@ -1227,15 +1434,17 @@ static int need_commit_due_to_time(struct cache *cache)  static int commit_if_needed(struct cache *cache)  { -	if (dm_cache_changed_this_transaction(cache->cmd) && -	    (cache->commit_requested || need_commit_due_to_time(cache))) { +	int r = 0; + +	if ((cache->commit_requested || need_commit_due_to_time(cache)) && +	    dm_cache_changed_this_transaction(cache->cmd)) {  		atomic_inc(&cache->stats.commit_count); -		cache->last_commit_jiffies = jiffies;  		cache->commit_requested = false; -		return dm_cache_commit(cache->cmd, false); +		r = dm_cache_commit(cache->cmd, false); +		cache->last_commit_jiffies = jiffies;  	} -	return 0; +	return r;  }  static void process_deferred_bios(struct cache *cache) @@ -1344,36 +1553,88 @@ static void writeback_some_dirty_blocks(struct cache *cache)  }  /*---------------------------------------------------------------- - * Main worker loop + * Invalidations. + * Dropping something from the cache *without* writing back.   *--------------------------------------------------------------*/ -static void start_quiescing(struct cache *cache) + +static void process_invalidation_request(struct cache *cache, struct invalidation_request *req)  { -	unsigned long flags; +	int r = 0; +	uint64_t begin = from_cblock(req->cblocks->begin); +	uint64_t end = from_cblock(req->cblocks->end); -	spin_lock_irqsave(&cache->lock, flags); -	cache->quiescing = 1; -	spin_unlock_irqrestore(&cache->lock, flags); +	while (begin != end) { +		r = policy_remove_cblock(cache->policy, to_cblock(begin)); +		if (!r) { +			r = dm_cache_remove_mapping(cache->cmd, to_cblock(begin)); +			if (r) +				break; + +		} else if (r == -ENODATA) { +			/* harmless, already unmapped */ +			r = 0; + +		} else { +			DMERR("policy_remove_cblock failed"); +			break; +		} + +		begin++; +        } + +	cache->commit_requested = true; + +	req->err = r; +	atomic_set(&req->complete, 1); + +	wake_up(&req->result_wait);  } -static void stop_quiescing(struct cache *cache) +static void process_invalidation_requests(struct cache *cache)  { -	unsigned long flags; +	struct list_head list; +	struct invalidation_request *req, *tmp; -	spin_lock_irqsave(&cache->lock, flags); -	cache->quiescing = 0; -	spin_unlock_irqrestore(&cache->lock, flags); +	INIT_LIST_HEAD(&list); +	spin_lock(&cache->invalidation_lock); +	list_splice_init(&cache->invalidation_requests, &list); +	spin_unlock(&cache->invalidation_lock); + +	list_for_each_entry_safe (req, tmp, &list, list) +		process_invalidation_request(cache, req);  } +/*---------------------------------------------------------------- + * Main worker loop + *--------------------------------------------------------------*/  static bool is_quiescing(struct cache *cache)  { -	int r; -	unsigned long flags; +	return atomic_read(&cache->quiescing); +} -	spin_lock_irqsave(&cache->lock, flags); -	r = cache->quiescing; -	spin_unlock_irqrestore(&cache->lock, flags); +static void ack_quiescing(struct cache *cache) +{ +	if (is_quiescing(cache)) { +		atomic_inc(&cache->quiescing_ack); +		wake_up(&cache->quiescing_wait); +	} +} -	return r; +static void wait_for_quiescing_ack(struct cache *cache) +{ +	wait_event(cache->quiescing_wait, atomic_read(&cache->quiescing_ack)); +} + +static void start_quiescing(struct cache *cache) +{ +	atomic_inc(&cache->quiescing); +	wait_for_quiescing_ack(cache); +} + +static void stop_quiescing(struct cache *cache) +{ +	atomic_set(&cache->quiescing, 0); +	atomic_set(&cache->quiescing_ack, 0);  }  static void wait_for_migrations(struct cache *cache) @@ -1412,7 +1673,8 @@ static int more_work(struct cache *cache)  			!bio_list_empty(&cache->deferred_writethrough_bios) ||  			!list_empty(&cache->quiesced_migrations) ||  			!list_empty(&cache->completed_migrations) || -			!list_empty(&cache->need_commit_migrations); +			!list_empty(&cache->need_commit_migrations) || +			cache->invalidate;  }  static void do_worker(struct work_struct *ws) @@ -1420,16 +1682,16 @@ static void do_worker(struct work_struct *ws)  	struct cache *cache = container_of(ws, struct cache, worker);  	do { -		if (!is_quiescing(cache)) +		if (!is_quiescing(cache)) { +			writeback_some_dirty_blocks(cache); +			process_deferred_writethrough_bios(cache);  			process_deferred_bios(cache); +			process_invalidation_requests(cache); +		}  		process_migrations(cache, &cache->quiesced_migrations, issue_copy);  		process_migrations(cache, &cache->completed_migrations, complete_migration); -		writeback_some_dirty_blocks(cache); - -		process_deferred_writethrough_bios(cache); -  		if (commit_if_needed(cache)) {  			process_deferred_flush_bios(cache, false); @@ -1442,6 +1704,9 @@ static void do_worker(struct work_struct *ws)  			process_migrations(cache, &cache->need_commit_migrations,  					   migration_success_post_commit);  		} + +		ack_quiescing(cache); +  	} while (more_work(cache));  } @@ -1715,7 +1980,7 @@ static int parse_block_size(struct cache_args *ca, struct dm_arg_set *as,  static void init_features(struct cache_features *cf)  {  	cf->mode = CM_WRITE; -	cf->write_through = false; +	cf->io_mode = CM_IO_WRITEBACK;  }  static int parse_features(struct cache_args *ca, struct dm_arg_set *as, @@ -1740,10 +2005,13 @@ static int parse_features(struct cache_args *ca, struct dm_arg_set *as,  		arg = dm_shift_arg(as);  		if (!strcasecmp(arg, "writeback")) -			cf->write_through = false; +			cf->io_mode = CM_IO_WRITEBACK;  		else if (!strcasecmp(arg, "writethrough")) -			cf->write_through = true; +			cf->io_mode = CM_IO_WRITETHROUGH; + +		else if (!strcasecmp(arg, "passthrough")) +			cf->io_mode = CM_IO_PASSTHROUGH;  		else {  			*error = "Unrecognised cache feature requested"; @@ -1872,47 +2140,19 @@ static int set_config_values(struct cache *cache, int argc, const char **argv)  static int create_cache_policy(struct cache *cache, struct cache_args *ca,  			       char **error)  { -	cache->policy =	dm_cache_policy_create(ca->policy_name, -					       cache->cache_size, -					       cache->origin_sectors, -					       cache->sectors_per_block); -	if (!cache->policy) { +	struct dm_cache_policy *p = dm_cache_policy_create(ca->policy_name, +							   cache->cache_size, +							   cache->origin_sectors, +							   cache->sectors_per_block); +	if (IS_ERR(p)) {  		*error = "Error creating cache's policy"; -		return -ENOMEM; +		return PTR_ERR(p);  	} +	cache->policy = p;  	return 0;  } -/* - * We want the discard block size to be a power of two, at least the size - * of the cache block size, and have no more than 2^14 discard blocks - * across the origin. - */ -#define MAX_DISCARD_BLOCKS (1 << 14) - -static bool too_many_discard_blocks(sector_t discard_block_size, -				    sector_t origin_size) -{ -	(void) sector_div(origin_size, discard_block_size); - -	return origin_size > MAX_DISCARD_BLOCKS; -} - -static sector_t calculate_discard_block_size(sector_t cache_block_size, -					     sector_t origin_size) -{ -	sector_t discard_block_size; - -	discard_block_size = roundup_pow_of_two(cache_block_size); - -	if (origin_size) -		while (too_many_discard_blocks(discard_block_size, origin_size)) -			discard_block_size *= 2; - -	return discard_block_size; -} -  #define DEFAULT_MIGRATION_THRESHOLD 2048  static int cache_create(struct cache_args *ca, struct cache **result) @@ -1937,6 +2177,8 @@ static int cache_create(struct cache_args *ca, struct cache **result)  	ti->num_discard_bios = 1;  	ti->discards_supported = true;  	ti->discard_zeroes_data_unsupported = true; +	/* Discard bios must be split on a block boundary */ +	ti->split_discard_bios = true;  	cache->features = ca->features;  	ti->per_bio_data_size = get_per_bio_data_size(cache); @@ -1995,6 +2237,22 @@ static int cache_create(struct cache_args *ca, struct cache **result)  	}  	cache->cmd = cmd; +	if (passthrough_mode(&cache->features)) { +		bool all_clean; + +		r = dm_cache_metadata_all_clean(cache->cmd, &all_clean); +		if (r) { +			*error = "dm_cache_metadata_all_clean() failed"; +			goto bad; +		} + +		if (!all_clean) { +			*error = "Cannot enter passthrough mode unless all blocks are clean"; +			r = -EINVAL; +			goto bad; +		} +	} +  	spin_lock_init(&cache->lock);  	bio_list_init(&cache->deferred_bios);  	bio_list_init(&cache->deferred_flush_bios); @@ -2005,8 +2263,12 @@ static int cache_create(struct cache_args *ca, struct cache **result)  	atomic_set(&cache->nr_migrations, 0);  	init_waitqueue_head(&cache->migration_wait); +	init_waitqueue_head(&cache->quiescing_wait); +	atomic_set(&cache->quiescing, 0); +	atomic_set(&cache->quiescing_ack, 0); +  	r = -ENOMEM; -	cache->nr_dirty = 0; +	atomic_set(&cache->nr_dirty, 0);  	cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));  	if (!cache->dirty_bitset) {  		*error = "could not allocate dirty bitset"; @@ -2014,16 +2276,13 @@ static int cache_create(struct cache_args *ca, struct cache **result)  	}  	clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size)); -	cache->discard_block_size = -		calculate_discard_block_size(cache->sectors_per_block, -					     cache->origin_sectors); -	cache->discard_nr_blocks = oblock_to_dblock(cache, cache->origin_blocks); -	cache->discard_bitset = alloc_bitset(from_dblock(cache->discard_nr_blocks)); +	cache->discard_nr_blocks = cache->origin_blocks; +	cache->discard_bitset = alloc_bitset(from_oblock(cache->discard_nr_blocks));  	if (!cache->discard_bitset) {  		*error = "could not allocate discard bitset";  		goto bad;  	} -	clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks)); +	clear_bitset(cache->discard_bitset, from_oblock(cache->discard_nr_blocks));  	cache->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle);  	if (IS_ERR(cache->copier)) { @@ -2064,7 +2323,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)  	cache->need_tick_bio = true;  	cache->sized = false; -	cache->quiescing = false; +	cache->invalidate = false;  	cache->commit_requested = false;  	cache->loaded_mappings = false;  	cache->loaded_discards = false; @@ -2078,6 +2337,9 @@ static int cache_create(struct cache_args *ca, struct cache **result)  	atomic_set(&cache->stats.commit_count, 0);  	atomic_set(&cache->stats.discard_count, 0); +	spin_lock_init(&cache->invalidation_lock); +	INIT_LIST_HEAD(&cache->invalidation_requests); +  	*result = cache;  	return 0; @@ -2155,20 +2417,18 @@ static int cache_map(struct dm_target *ti, struct bio *bio)  	bool discarded_block;  	struct dm_bio_prison_cell *cell;  	struct policy_result lookup_result; -	struct per_bio_data *pb; +	struct per_bio_data *pb = init_per_bio_data(bio, pb_data_size); -	if (from_oblock(block) > from_oblock(cache->origin_blocks)) { +	if (unlikely(from_oblock(block) >= from_oblock(cache->origin_blocks))) {  		/*  		 * This can only occur if the io goes to a partial block at  		 * the end of the origin device.  We don't cache these.  		 * Just remap to the origin and carry on.  		 */ -		remap_to_origin_clear_discard(cache, bio, block); +		remap_to_origin(cache, bio);  		return DM_MAPIO_REMAPPED;  	} -	pb = init_per_bio_data(bio, pb_data_size); -  	if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) {  		defer_bio(cache, bio);  		return DM_MAPIO_SUBMITTED; @@ -2207,17 +2467,38 @@ static int cache_map(struct dm_target *ti, struct bio *bio)  		return DM_MAPIO_SUBMITTED;  	} +	r = DM_MAPIO_REMAPPED;  	switch (lookup_result.op) {  	case POLICY_HIT: -		inc_hit_counter(cache, bio); -		pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); +		if (passthrough_mode(&cache->features)) { +			if (bio_data_dir(bio) == WRITE) { +				/* +				 * We need to invalidate this block, so +				 * defer for the worker thread. +				 */ +				cell_defer(cache, cell, true); +				r = DM_MAPIO_SUBMITTED; + +			} else { +				pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); +				inc_miss_counter(cache, bio); +				remap_to_origin_clear_discard(cache, bio, block); + +				cell_defer(cache, cell, false); +			} -		if (is_writethrough_io(cache, bio, lookup_result.cblock)) -			remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock); -		else -			remap_to_cache_dirty(cache, bio, block, lookup_result.cblock); +		} else { +			inc_hit_counter(cache, bio); +			pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); -		cell_defer(cache, cell, false); +			if (bio_data_dir(bio) == WRITE && writethrough_mode(&cache->features) && +			    !is_dirty(cache, lookup_result.cblock)) +				remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock); +			else +				remap_to_cache_dirty(cache, bio, block, lookup_result.cblock); + +			cell_defer(cache, cell, false); +		}  		break;  	case POLICY_MISS: @@ -2242,10 +2523,10 @@ static int cache_map(struct dm_target *ti, struct bio *bio)  		DMERR_LIMIT("%s: erroring bio: unknown policy op: %u", __func__,  			    (unsigned) lookup_result.op);  		bio_io_error(bio); -		return DM_MAPIO_SUBMITTED; +		r = DM_MAPIO_SUBMITTED;  	} -	return DM_MAPIO_REMAPPED; +	return r;  }  static int cache_end_io(struct dm_target *ti, struct bio *bio, int error) @@ -2286,16 +2567,16 @@ static int write_discard_bitset(struct cache *cache)  {  	unsigned i, r; -	r = dm_cache_discard_bitset_resize(cache->cmd, cache->discard_block_size, -					   cache->discard_nr_blocks); +	r = dm_cache_discard_bitset_resize(cache->cmd, cache->sectors_per_block, +					   cache->origin_blocks);  	if (r) {  		DMERR("could not resize on-disk discard bitset");  		return r;  	} -	for (i = 0; i < from_dblock(cache->discard_nr_blocks); i++) { -		r = dm_cache_set_discard(cache->cmd, to_dblock(i), -					 is_discarded(cache, to_dblock(i))); +	for (i = 0; i < from_oblock(cache->discard_nr_blocks); i++) { +		r = dm_cache_set_discard(cache->cmd, to_oblock(i), +					 is_discarded(cache, to_oblock(i)));  		if (r)  			return r;  	} @@ -2303,30 +2584,6 @@ static int write_discard_bitset(struct cache *cache)  	return 0;  } -static int save_hint(void *context, dm_cblock_t cblock, dm_oblock_t oblock, -		     uint32_t hint) -{ -	struct cache *cache = context; -	return dm_cache_save_hint(cache->cmd, cblock, hint); -} - -static int write_hints(struct cache *cache) -{ -	int r; - -	r = dm_cache_begin_hints(cache->cmd, cache->policy); -	if (r) { -		DMERR("dm_cache_begin_hints failed"); -		return r; -	} - -	r = policy_walk_mappings(cache->policy, save_hint, cache); -	if (r) -		DMERR("policy_walk_mappings failed"); - -	return r; -} -  /*   * returns true on success   */ @@ -2344,7 +2601,7 @@ static bool sync_metadata(struct cache *cache)  	save_stats(cache); -	r3 = write_hints(cache); +	r3 = dm_cache_write_hints(cache->cmd, cache->policy);  	if (r3)  		DMERR("could not write hints"); @@ -2392,16 +2649,56 @@ static int load_mapping(void *context, dm_oblock_t oblock, dm_cblock_t cblock,  }  static int load_discard(void *context, sector_t discard_block_size, -			dm_dblock_t dblock, bool discard) +			dm_oblock_t oblock, bool discard)  {  	struct cache *cache = context; -	/* FIXME: handle mis-matched block size */ -  	if (discard) -		set_discard(cache, dblock); +		set_discard(cache, oblock);  	else -		clear_discard(cache, dblock); +		clear_discard(cache, oblock); + +	return 0; +} + +static dm_cblock_t get_cache_dev_size(struct cache *cache) +{ +	sector_t size = get_dev_size(cache->cache_dev); +	(void) sector_div(size, cache->sectors_per_block); +	return to_cblock(size); +} + +static bool can_resize(struct cache *cache, dm_cblock_t new_size) +{ +	if (from_cblock(new_size) > from_cblock(cache->cache_size)) +		return true; + +	/* +	 * We can't drop a dirty block when shrinking the cache. +	 */ +	while (from_cblock(new_size) < from_cblock(cache->cache_size)) { +		new_size = to_cblock(from_cblock(new_size) + 1); +		if (is_dirty(cache, new_size)) { +			DMERR("unable to shrink cache; cache block %llu is dirty", +			      (unsigned long long) from_cblock(new_size)); +			return false; +		} +	} + +	return true; +} + +static int resize_cache_dev(struct cache *cache, dm_cblock_t new_size) +{ +	int r; + +	r = dm_cache_resize(cache->cmd, new_size); +	if (r) { +		DMERR("could not resize cache metadata"); +		return r; +	} + +	cache->cache_size = new_size;  	return 0;  } @@ -2410,22 +2707,25 @@ static int cache_preresume(struct dm_target *ti)  {  	int r = 0;  	struct cache *cache = ti->private; -	sector_t actual_cache_size = get_dev_size(cache->cache_dev); -	(void) sector_div(actual_cache_size, cache->sectors_per_block); +	dm_cblock_t csize = get_cache_dev_size(cache);  	/*  	 * Check to see if the cache has resized.  	 */ -	if (from_cblock(cache->cache_size) != actual_cache_size || !cache->sized) { -		cache->cache_size = to_cblock(actual_cache_size); - -		r = dm_cache_resize(cache->cmd, cache->cache_size); -		if (r) { -			DMERR("could not resize cache metadata"); +	if (!cache->sized) { +		r = resize_cache_dev(cache, csize); +		if (r)  			return r; -		}  		cache->sized = true; + +	} else if (csize != cache->cache_size) { +		if (!can_resize(cache, csize)) +			return -EINVAL; + +		r = resize_cache_dev(cache, csize); +		if (r) +			return r;  	}  	if (!cache->loaded_mappings) { @@ -2463,12 +2763,13 @@ static void cache_resume(struct dm_target *ti)  /*   * Status format:   * - * <#used metadata blocks>/<#total metadata blocks> + * <metadata block size> <#used metadata blocks>/<#total metadata blocks> + * <cache block size> <#used cache blocks>/<#total cache blocks>   * <#read hits> <#read misses> <#write hits> <#write misses> - * <#demotions> <#promotions> <#blocks in cache> <#dirty> + * <#demotions> <#promotions> <#dirty>   * <#features> <features>*   * <#core args> <core args> - * <#policy args> <policy args>* + * <policy name> <#policy args> <policy args>*   */  static void cache_status(struct dm_target *ti, status_type_t type,  			 unsigned status_flags, char *result, unsigned maxlen) @@ -2506,24 +2807,38 @@ static void cache_status(struct dm_target *ti, status_type_t type,  		residency = policy_residency(cache->policy); -		DMEMIT("%llu/%llu %u %u %u %u %u %u %llu %u ", +		DMEMIT("%u %llu/%llu %u %llu/%llu %u %u %u %u %u %u %lu ", +		       (unsigned)(DM_CACHE_METADATA_BLOCK_SIZE >> SECTOR_SHIFT),  		       (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),  		       (unsigned long long)nr_blocks_metadata, +		       cache->sectors_per_block, +		       (unsigned long long) from_cblock(residency), +		       (unsigned long long) from_cblock(cache->cache_size),  		       (unsigned) atomic_read(&cache->stats.read_hit),  		       (unsigned) atomic_read(&cache->stats.read_miss),  		       (unsigned) atomic_read(&cache->stats.write_hit),  		       (unsigned) atomic_read(&cache->stats.write_miss),  		       (unsigned) atomic_read(&cache->stats.demotion),  		       (unsigned) atomic_read(&cache->stats.promotion), -		       (unsigned long long) from_cblock(residency), -		       cache->nr_dirty); +		       (unsigned long) atomic_read(&cache->nr_dirty)); -		if (cache->features.write_through) +		if (writethrough_mode(&cache->features))  			DMEMIT("1 writethrough "); -		else -			DMEMIT("0 "); + +		else if (passthrough_mode(&cache->features)) +			DMEMIT("1 passthrough "); + +		else if (writeback_mode(&cache->features)) +			DMEMIT("1 writeback "); + +		else { +			DMERR("internal error: unknown io mode: %d", (int) cache->features.io_mode); +			goto err; +		}  		DMEMIT("2 migration_threshold %llu ", (unsigned long long) cache->migration_threshold); + +		DMEMIT("%s ", dm_cache_policy_get_name(cache->policy));  		if (sz < maxlen) {  			r = policy_emit_config_values(cache->policy, result + sz, maxlen - sz);  			if (r) @@ -2553,7 +2868,128 @@ err:  }  /* - * Supports <key> <value>. + * A cache block range can take two forms: + * + * i) A single cblock, eg. '3456' + * ii) A begin and end cblock with dots between, eg. 123-234 + */ +static int parse_cblock_range(struct cache *cache, const char *str, +			      struct cblock_range *result) +{ +	char dummy; +	uint64_t b, e; +	int r; + +	/* +	 * Try and parse form (ii) first. +	 */ +	r = sscanf(str, "%llu-%llu%c", &b, &e, &dummy); +	if (r < 0) +		return r; + +	if (r == 2) { +		result->begin = to_cblock(b); +		result->end = to_cblock(e); +		return 0; +	} + +	/* +	 * That didn't work, try form (i). +	 */ +	r = sscanf(str, "%llu%c", &b, &dummy); +	if (r < 0) +		return r; + +	if (r == 1) { +		result->begin = to_cblock(b); +		result->end = to_cblock(from_cblock(result->begin) + 1u); +		return 0; +	} + +	DMERR("invalid cblock range '%s'", str); +	return -EINVAL; +} + +static int validate_cblock_range(struct cache *cache, struct cblock_range *range) +{ +	uint64_t b = from_cblock(range->begin); +	uint64_t e = from_cblock(range->end); +	uint64_t n = from_cblock(cache->cache_size); + +	if (b >= n) { +		DMERR("begin cblock out of range: %llu >= %llu", b, n); +		return -EINVAL; +	} + +	if (e > n) { +		DMERR("end cblock out of range: %llu > %llu", e, n); +		return -EINVAL; +	} + +	if (b >= e) { +		DMERR("invalid cblock range: %llu >= %llu", b, e); +		return -EINVAL; +	} + +	return 0; +} + +static int request_invalidation(struct cache *cache, struct cblock_range *range) +{ +	struct invalidation_request req; + +	INIT_LIST_HEAD(&req.list); +	req.cblocks = range; +	atomic_set(&req.complete, 0); +	req.err = 0; +	init_waitqueue_head(&req.result_wait); + +	spin_lock(&cache->invalidation_lock); +	list_add(&req.list, &cache->invalidation_requests); +	spin_unlock(&cache->invalidation_lock); +	wake_worker(cache); + +	wait_event(req.result_wait, atomic_read(&req.complete)); +	return req.err; +} + +static int process_invalidate_cblocks_message(struct cache *cache, unsigned count, +					      const char **cblock_ranges) +{ +	int r = 0; +	unsigned i; +	struct cblock_range range; + +	if (!passthrough_mode(&cache->features)) { +		DMERR("cache has to be in passthrough mode for invalidation"); +		return -EPERM; +	} + +	for (i = 0; i < count; i++) { +		r = parse_cblock_range(cache, cblock_ranges[i], &range); +		if (r) +			break; + +		r = validate_cblock_range(cache, &range); +		if (r) +			break; + +		/* +		 * Pass begin and end origin blocks to the worker and wake it. +		 */ +		r = request_invalidation(cache, &range); +		if (r) +			break; +	} + +	return r; +} + +/* + * Supports + *	"<key> <value>" + * and + *     "invalidate_cblocks [(<begin>)|(<begin>-<end>)]*   *   * The key migration_threshold is supported by the cache target core.   */ @@ -2561,6 +2997,12 @@ static int cache_message(struct dm_target *ti, unsigned argc, char **argv)  {  	struct cache *cache = ti->private; +	if (!argc) +		return -EINVAL; + +	if (!strcasecmp(argv[0], "invalidate_cblocks")) +		return process_invalidate_cblocks_message(cache, argc - 1, (const char **) argv + 1); +  	if (argc != 2)  		return -EINVAL; @@ -2605,8 +3047,8 @@ static void set_discard_limits(struct cache *cache, struct queue_limits *limits)  	/*  	 * FIXME: these limits may be incompatible with the cache device  	 */ -	limits->max_discard_sectors = cache->discard_block_size * 1024; -	limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT; +	limits->max_discard_sectors = cache->sectors_per_block; +	limits->discard_granularity = cache->sectors_per_block << SECTOR_SHIFT;  }  static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits) @@ -2630,7 +3072,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)  static struct target_type cache_target = {  	.name = "cache", -	.version = {1, 1, 1}, +	.version = {1, 4, 0},  	.module = THIS_MODULE,  	.ctr = cache_ctr,  	.dtr = cache_dtr, diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 0fce0bc1a95..4cba2d808af 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -1,7 +1,8 @@  /* - * Copyright (C) 2003 Christophe Saout <christophe@saout.de> + * Copyright (C) 2003 Jana Saout <jana@saout.de>   * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>   * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved. + * Copyright (C) 2013 Milan Broz <gmazyland@gmail.com>   *   * This file is released under the GPL.   */ @@ -18,7 +19,6 @@  #include <linux/crypto.h>  #include <linux/workqueue.h>  #include <linux/backing-dev.h> -#include <linux/percpu.h>  #include <linux/atomic.h>  #include <linux/scatterlist.h>  #include <asm/page.h> @@ -38,12 +38,11 @@ struct convert_context {  	struct completion restart;  	struct bio *bio_in;  	struct bio *bio_out; -	unsigned int offset_in; -	unsigned int offset_out; -	unsigned int idx_in; -	unsigned int idx_out; +	struct bvec_iter iter_in; +	struct bvec_iter iter_out;  	sector_t cc_sector;  	atomic_t cc_pending; +	struct ablkcipher_request *req;  };  /* @@ -98,6 +97,13 @@ struct iv_lmk_private {  	u8 *seed;  }; +#define TCW_WHITENING_SIZE 16 +struct iv_tcw_private { +	struct crypto_shash *crc32_tfm; +	u8 *iv_seed; +	u8 *whitening; +}; +  /*   * Crypt: maps a linear range of a block device   * and encrypts / decrypts at the same time. @@ -105,15 +111,7 @@ struct iv_lmk_private {  enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID };  /* - * Duplicated per-CPU state for cipher. - */ -struct crypt_cpu { -	struct ablkcipher_request *req; -}; - -/* - * The fields in here must be read only after initialization, - * changing state should be in crypt_cpu. + * The fields in here must be read only after initialization.   */  struct crypt_config {  	struct dm_dev *dev; @@ -139,16 +137,11 @@ struct crypt_config {  		struct iv_essiv_private essiv;  		struct iv_benbi_private benbi;  		struct iv_lmk_private lmk; +		struct iv_tcw_private tcw;  	} iv_gen_private;  	sector_t iv_offset;  	unsigned int iv_size; -	/* -	 * Duplicated per cpu state. Access through -	 * per_cpu_ptr() only. -	 */ -	struct crypt_cpu __percpu *cpu; -  	/* ESSIV: struct crypto_cipher *essiv_tfm */  	void *iv_private;  	struct crypto_ablkcipher **tfms; @@ -171,7 +164,8 @@ struct crypt_config {  	unsigned long flags;  	unsigned int key_size; -	unsigned int key_parts; +	unsigned int key_parts;      /* independent parts in key buffer */ +	unsigned int key_extra_size; /* additional keys length */  	u8 key[0];  }; @@ -184,11 +178,6 @@ static void clone_init(struct dm_crypt_io *, struct bio *);  static void kcryptd_queue_crypt(struct dm_crypt_io *io);  static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq); -static struct crypt_cpu *this_crypt_config(struct crypt_config *cc) -{ -	return this_cpu_ptr(cc->cpu); -} -  /*   * Use this to access cipher attributes that are the same for each CPU.   */ @@ -230,6 +219,16 @@ static struct crypto_ablkcipher *any_tfm(struct crypt_config *cc)   *         version 3: the same as version 2 with additional IV seed   *                   (it uses 65 keys, last key is used as IV seed)   * + * tcw:  Compatible implementation of the block chaining mode used + *       by the TrueCrypt device encryption system (prior to version 4.1). + *       For more info see: http://www.truecrypt.org + *       It operates on full 512 byte sectors and uses CBC + *       with an IV derived from initial key and the sector number. + *       In addition, whitening value is applied on every sector, whitening + *       is calculated from initial key, sector number and mixed using CRC32. + *       Note that this encryption scheme is vulnerable to watermarking attacks + *       and should be used for old compatible containers access only. + *   * plumb: unimplemented, see:   * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454   */ @@ -530,7 +529,7 @@ static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,  		char ctx[crypto_shash_descsize(lmk->hash_tfm)];  	} sdesc;  	struct md5_state md5state; -	u32 buf[4]; +	__le32 buf[4];  	int i, r;  	sdesc.desc.tfm = lmk->hash_tfm; @@ -608,6 +607,153 @@ static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv,  	return r;  } +static void crypt_iv_tcw_dtr(struct crypt_config *cc) +{ +	struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; + +	kzfree(tcw->iv_seed); +	tcw->iv_seed = NULL; +	kzfree(tcw->whitening); +	tcw->whitening = NULL; + +	if (tcw->crc32_tfm && !IS_ERR(tcw->crc32_tfm)) +		crypto_free_shash(tcw->crc32_tfm); +	tcw->crc32_tfm = NULL; +} + +static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti, +			    const char *opts) +{ +	struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; + +	if (cc->key_size <= (cc->iv_size + TCW_WHITENING_SIZE)) { +		ti->error = "Wrong key size for TCW"; +		return -EINVAL; +	} + +	tcw->crc32_tfm = crypto_alloc_shash("crc32", 0, 0); +	if (IS_ERR(tcw->crc32_tfm)) { +		ti->error = "Error initializing CRC32 in TCW"; +		return PTR_ERR(tcw->crc32_tfm); +	} + +	tcw->iv_seed = kzalloc(cc->iv_size, GFP_KERNEL); +	tcw->whitening = kzalloc(TCW_WHITENING_SIZE, GFP_KERNEL); +	if (!tcw->iv_seed || !tcw->whitening) { +		crypt_iv_tcw_dtr(cc); +		ti->error = "Error allocating seed storage in TCW"; +		return -ENOMEM; +	} + +	return 0; +} + +static int crypt_iv_tcw_init(struct crypt_config *cc) +{ +	struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; +	int key_offset = cc->key_size - cc->iv_size - TCW_WHITENING_SIZE; + +	memcpy(tcw->iv_seed, &cc->key[key_offset], cc->iv_size); +	memcpy(tcw->whitening, &cc->key[key_offset + cc->iv_size], +	       TCW_WHITENING_SIZE); + +	return 0; +} + +static int crypt_iv_tcw_wipe(struct crypt_config *cc) +{ +	struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; + +	memset(tcw->iv_seed, 0, cc->iv_size); +	memset(tcw->whitening, 0, TCW_WHITENING_SIZE); + +	return 0; +} + +static int crypt_iv_tcw_whitening(struct crypt_config *cc, +				  struct dm_crypt_request *dmreq, +				  u8 *data) +{ +	struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; +	u64 sector = cpu_to_le64((u64)dmreq->iv_sector); +	u8 buf[TCW_WHITENING_SIZE]; +	struct { +		struct shash_desc desc; +		char ctx[crypto_shash_descsize(tcw->crc32_tfm)]; +	} sdesc; +	int i, r; + +	/* xor whitening with sector number */ +	memcpy(buf, tcw->whitening, TCW_WHITENING_SIZE); +	crypto_xor(buf, (u8 *)§or, 8); +	crypto_xor(&buf[8], (u8 *)§or, 8); + +	/* calculate crc32 for every 32bit part and xor it */ +	sdesc.desc.tfm = tcw->crc32_tfm; +	sdesc.desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; +	for (i = 0; i < 4; i++) { +		r = crypto_shash_init(&sdesc.desc); +		if (r) +			goto out; +		r = crypto_shash_update(&sdesc.desc, &buf[i * 4], 4); +		if (r) +			goto out; +		r = crypto_shash_final(&sdesc.desc, &buf[i * 4]); +		if (r) +			goto out; +	} +	crypto_xor(&buf[0], &buf[12], 4); +	crypto_xor(&buf[4], &buf[8], 4); + +	/* apply whitening (8 bytes) to whole sector */ +	for (i = 0; i < ((1 << SECTOR_SHIFT) / 8); i++) +		crypto_xor(data + i * 8, buf, 8); +out: +	memset(buf, 0, sizeof(buf)); +	return r; +} + +static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv, +			    struct dm_crypt_request *dmreq) +{ +	struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; +	u64 sector = cpu_to_le64((u64)dmreq->iv_sector); +	u8 *src; +	int r = 0; + +	/* Remove whitening from ciphertext */ +	if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) { +		src = kmap_atomic(sg_page(&dmreq->sg_in)); +		r = crypt_iv_tcw_whitening(cc, dmreq, src + dmreq->sg_in.offset); +		kunmap_atomic(src); +	} + +	/* Calculate IV */ +	memcpy(iv, tcw->iv_seed, cc->iv_size); +	crypto_xor(iv, (u8 *)§or, 8); +	if (cc->iv_size > 8) +		crypto_xor(&iv[8], (u8 *)§or, cc->iv_size - 8); + +	return r; +} + +static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv, +			     struct dm_crypt_request *dmreq) +{ +	u8 *dst; +	int r; + +	if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) +		return 0; + +	/* Apply whitening on ciphertext */ +	dst = kmap_atomic(sg_page(&dmreq->sg_out)); +	r = crypt_iv_tcw_whitening(cc, dmreq, dst + dmreq->sg_out.offset); +	kunmap_atomic(dst); + +	return r; +} +  static struct crypt_iv_operations crypt_iv_plain_ops = {  	.generator = crypt_iv_plain_gen  }; @@ -643,6 +789,15 @@ static struct crypt_iv_operations crypt_iv_lmk_ops = {  	.post	   = crypt_iv_lmk_post  }; +static struct crypt_iv_operations crypt_iv_tcw_ops = { +	.ctr	   = crypt_iv_tcw_ctr, +	.dtr	   = crypt_iv_tcw_dtr, +	.init	   = crypt_iv_tcw_init, +	.wipe	   = crypt_iv_tcw_wipe, +	.generator = crypt_iv_tcw_gen, +	.post	   = crypt_iv_tcw_post +}; +  static void crypt_convert_init(struct crypt_config *cc,  			       struct convert_context *ctx,  			       struct bio *bio_out, struct bio *bio_in, @@ -650,10 +805,10 @@ static void crypt_convert_init(struct crypt_config *cc,  {  	ctx->bio_in = bio_in;  	ctx->bio_out = bio_out; -	ctx->offset_in = 0; -	ctx->offset_out = 0; -	ctx->idx_in = bio_in ? bio_in->bi_idx : 0; -	ctx->idx_out = bio_out ? bio_out->bi_idx : 0; +	if (bio_in) +		ctx->iter_in = bio_in->bi_iter; +	if (bio_out) +		ctx->iter_out = bio_out->bi_iter;  	ctx->cc_sector = sector + cc->iv_offset;  	init_completion(&ctx->restart);  } @@ -681,8 +836,8 @@ static int crypt_convert_block(struct crypt_config *cc,  			       struct convert_context *ctx,  			       struct ablkcipher_request *req)  { -	struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in); -	struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out); +	struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in); +	struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);  	struct dm_crypt_request *dmreq;  	u8 *iv;  	int r; @@ -693,24 +848,15 @@ static int crypt_convert_block(struct crypt_config *cc,  	dmreq->iv_sector = ctx->cc_sector;  	dmreq->ctx = ctx;  	sg_init_table(&dmreq->sg_in, 1); -	sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT, -		    bv_in->bv_offset + ctx->offset_in); +	sg_set_page(&dmreq->sg_in, bv_in.bv_page, 1 << SECTOR_SHIFT, +		    bv_in.bv_offset);  	sg_init_table(&dmreq->sg_out, 1); -	sg_set_page(&dmreq->sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT, -		    bv_out->bv_offset + ctx->offset_out); - -	ctx->offset_in += 1 << SECTOR_SHIFT; -	if (ctx->offset_in >= bv_in->bv_len) { -		ctx->offset_in = 0; -		ctx->idx_in++; -	} +	sg_set_page(&dmreq->sg_out, bv_out.bv_page, 1 << SECTOR_SHIFT, +		    bv_out.bv_offset); -	ctx->offset_out += 1 << SECTOR_SHIFT; -	if (ctx->offset_out >= bv_out->bv_len) { -		ctx->offset_out = 0; -		ctx->idx_out++; -	} +	bio_advance_iter(ctx->bio_in, &ctx->iter_in, 1 << SECTOR_SHIFT); +	bio_advance_iter(ctx->bio_out, &ctx->iter_out, 1 << SECTOR_SHIFT);  	if (cc->iv_gen_ops) {  		r = cc->iv_gen_ops->generator(cc, iv, dmreq); @@ -738,16 +884,15 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,  static void crypt_alloc_req(struct crypt_config *cc,  			    struct convert_context *ctx)  { -	struct crypt_cpu *this_cc = this_crypt_config(cc);  	unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1); -	if (!this_cc->req) -		this_cc->req = mempool_alloc(cc->req_pool, GFP_NOIO); +	if (!ctx->req) +		ctx->req = mempool_alloc(cc->req_pool, GFP_NOIO); -	ablkcipher_request_set_tfm(this_cc->req, cc->tfms[key_index]); -	ablkcipher_request_set_callback(this_cc->req, +	ablkcipher_request_set_tfm(ctx->req, cc->tfms[key_index]); +	ablkcipher_request_set_callback(ctx->req,  	    CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, -	    kcryptd_async_done, dmreq_of_req(cc, this_cc->req)); +	    kcryptd_async_done, dmreq_of_req(cc, ctx->req));  }  /* @@ -756,28 +901,26 @@ static void crypt_alloc_req(struct crypt_config *cc,  static int crypt_convert(struct crypt_config *cc,  			 struct convert_context *ctx)  { -	struct crypt_cpu *this_cc = this_crypt_config(cc);  	int r;  	atomic_set(&ctx->cc_pending, 1); -	while(ctx->idx_in < ctx->bio_in->bi_vcnt && -	      ctx->idx_out < ctx->bio_out->bi_vcnt) { +	while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) {  		crypt_alloc_req(cc, ctx);  		atomic_inc(&ctx->cc_pending); -		r = crypt_convert_block(cc, ctx, this_cc->req); +		r = crypt_convert_block(cc, ctx, ctx->req);  		switch (r) {  		/* async */  		case -EBUSY:  			wait_for_completion(&ctx->restart); -			INIT_COMPLETION(ctx->restart); +			reinit_completion(&ctx->restart);  			/* fall through*/  		case -EINPROGRESS: -			this_cc->req = NULL; +			ctx->req = NULL;  			ctx->cc_sector++;  			continue; @@ -845,7 +988,7 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size,  		size -= len;  	} -	if (!clone->bi_size) { +	if (!clone->bi_iter.bi_size) {  		bio_put(clone);  		return NULL;  	} @@ -876,6 +1019,7 @@ static struct dm_crypt_io *crypt_io_alloc(struct crypt_config *cc,  	io->sector = sector;  	io->error = 0;  	io->base_io = NULL; +	io->ctx.req = NULL;  	atomic_set(&io->io_pending, 0);  	return io; @@ -901,6 +1045,8 @@ static void crypt_dec_pending(struct dm_crypt_io *io)  	if (!atomic_dec_and_test(&io->io_pending))  		return; +	if (io->ctx.req) +		mempool_free(io->ctx.req, cc->req_pool);  	mempool_free(io, cc->io_pool);  	if (likely(!base_io)) @@ -985,7 +1131,7 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)  	crypt_inc_pending(io);  	clone_init(io, clone); -	clone->bi_sector = cc->start + io->sector; +	clone->bi_iter.bi_sector = cc->start + io->sector;  	generic_make_request(clone);  	return 0; @@ -1031,9 +1177,9 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)  	}  	/* crypt_convert should have filled the clone bio */ -	BUG_ON(io->ctx.idx_out < clone->bi_vcnt); +	BUG_ON(io->ctx.iter_out.bi_size); -	clone->bi_sector = cc->start + io->sector; +	clone->bi_iter.bi_sector = cc->start + io->sector;  	if (async)  		kcryptd_queue_io(io); @@ -1048,7 +1194,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)  	struct dm_crypt_io *new_io;  	int crypt_finished;  	unsigned out_of_pages = 0; -	unsigned remaining = io->base_bio->bi_size; +	unsigned remaining = io->base_bio->bi_iter.bi_size;  	sector_t sector = io->sector;  	int r; @@ -1070,9 +1216,9 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)  		}  		io->ctx.bio_out = clone; -		io->ctx.idx_out = 0; +		io->ctx.iter_out = clone->bi_iter; -		remaining -= clone->bi_size; +		remaining -= clone->bi_iter.bi_size;  		sector += bio_sectors(clone);  		crypt_inc_pending(io); @@ -1114,8 +1260,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)  			crypt_inc_pending(new_io);  			crypt_convert_init(cc, &new_io->ctx, NULL,  					   io->base_bio, sector); -			new_io->ctx.idx_in = io->ctx.idx_in; -			new_io->ctx.offset_in = io->ctx.offset_in; +			new_io->ctx.iter_in = io->ctx.iter_in;  			/*  			 * Fragments after the first use the base_io @@ -1274,9 +1419,12 @@ static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)  static int crypt_setkey_allcpus(struct crypt_config *cc)  { -	unsigned subkey_size = cc->key_size >> ilog2(cc->tfms_count); +	unsigned subkey_size;  	int err = 0, i, r; +	/* Ignore extra keys (which are used for IV etc) */ +	subkey_size = (cc->key_size - cc->key_extra_size) >> ilog2(cc->tfms_count); +  	for (i = 0; i < cc->tfms_count; i++) {  		r = crypto_ablkcipher_setkey(cc->tfms[i],  					     cc->key + (i * subkey_size), @@ -1326,8 +1474,6 @@ static int crypt_wipe_key(struct crypt_config *cc)  static void crypt_dtr(struct dm_target *ti)  {  	struct crypt_config *cc = ti->private; -	struct crypt_cpu *cpu_cc; -	int cpu;  	ti->private = NULL; @@ -1339,13 +1485,6 @@ static void crypt_dtr(struct dm_target *ti)  	if (cc->crypt_queue)  		destroy_workqueue(cc->crypt_queue); -	if (cc->cpu) -		for_each_possible_cpu(cpu) { -			cpu_cc = per_cpu_ptr(cc->cpu, cpu); -			if (cpu_cc->req) -				mempool_free(cpu_cc->req, cc->req_pool); -		} -  	crypt_free_tfms(cc);  	if (cc->bs) @@ -1364,9 +1503,6 @@ static void crypt_dtr(struct dm_target *ti)  	if (cc->dev)  		dm_put_device(ti, cc->dev); -	if (cc->cpu) -		free_percpu(cc->cpu); -  	kzfree(cc->cipher);  	kzfree(cc->cipher_string); @@ -1409,6 +1545,7 @@ static int crypt_ctr_cipher(struct dm_target *ti,  		return -EINVAL;  	}  	cc->key_parts = cc->tfms_count; +	cc->key_extra_size = 0;  	cc->cipher = kstrdup(cipher, GFP_KERNEL);  	if (!cc->cipher) @@ -1421,13 +1558,6 @@ static int crypt_ctr_cipher(struct dm_target *ti,  	if (tmp)  		DMWARN("Ignoring unexpected additional cipher options"); -	cc->cpu = __alloc_percpu(sizeof(*(cc->cpu)), -				 __alignof__(struct crypt_cpu)); -	if (!cc->cpu) { -		ti->error = "Cannot allocate per cpu state"; -		goto bad_mem; -	} -  	/*  	 * For compatibility with the original dm-crypt mapping format, if  	 * only the cipher name is supplied, use cbc-plain. @@ -1460,13 +1590,6 @@ static int crypt_ctr_cipher(struct dm_target *ti,  		goto bad;  	} -	/* Initialize and set key */ -	ret = crypt_set_key(cc, key); -	if (ret < 0) { -		ti->error = "Error decoding and setting key"; -		goto bad; -	} -  	/* Initialize IV */  	cc->iv_size = crypto_ablkcipher_ivsize(any_tfm(cc));  	if (cc->iv_size) @@ -1493,18 +1616,33 @@ static int crypt_ctr_cipher(struct dm_target *ti,  		cc->iv_gen_ops = &crypt_iv_null_ops;  	else if (strcmp(ivmode, "lmk") == 0) {  		cc->iv_gen_ops = &crypt_iv_lmk_ops; -		/* Version 2 and 3 is recognised according +		/* +		 * Version 2 and 3 is recognised according  		 * to length of provided multi-key string.  		 * If present (version 3), last key is used as IV seed. +		 * All keys (including IV seed) are always the same size.  		 */ -		if (cc->key_size % cc->key_parts) +		if (cc->key_size % cc->key_parts) {  			cc->key_parts++; +			cc->key_extra_size = cc->key_size / cc->key_parts; +		} +	} else if (strcmp(ivmode, "tcw") == 0) { +		cc->iv_gen_ops = &crypt_iv_tcw_ops; +		cc->key_parts += 2; /* IV + whitening */ +		cc->key_extra_size = cc->iv_size + TCW_WHITENING_SIZE;  	} else {  		ret = -EINVAL;  		ti->error = "Invalid IV mode";  		goto bad;  	} +	/* Initialize and set key */ +	ret = crypt_set_key(cc, key); +	if (ret < 0) { +		ti->error = "Error decoding and setting key"; +		goto bad; +	} +  	/* Allocate IV */  	if (cc->iv_gen_ops && cc->iv_gen_ops->ctr) {  		ret = cc->iv_gen_ops->ctr(cc, ti, ivopts); @@ -1681,11 +1819,12 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)  	if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) {  		bio->bi_bdev = cc->dev->bdev;  		if (bio_sectors(bio)) -			bio->bi_sector = cc->start + dm_target_offset(ti, bio->bi_sector); +			bio->bi_iter.bi_sector = cc->start + +				dm_target_offset(ti, bio->bi_iter.bi_sector);  		return DM_MAPIO_REMAPPED;  	} -	io = crypt_io_alloc(cc, bio, dm_target_offset(ti, bio->bi_sector)); +	io = crypt_io_alloc(cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));  	if (bio_data_dir(io->base_bio) == READ) {  		if (kcryptd_io_read(io, GFP_NOWAIT)) @@ -1817,7 +1956,7 @@ static int crypt_iterate_devices(struct dm_target *ti,  static struct target_type crypt_target = {  	.name   = "crypt", -	.version = {1, 12, 1}, +	.version = {1, 13, 0},  	.module = THIS_MODULE,  	.ctr    = crypt_ctr,  	.dtr    = crypt_dtr, @@ -1857,6 +1996,6 @@ static void __exit dm_crypt_exit(void)  module_init(dm_crypt_init);  module_exit(dm_crypt_exit); -MODULE_AUTHOR("Christophe Saout <christophe@saout.de>"); +MODULE_AUTHOR("Jana Saout <jana@saout.de>");  MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption");  MODULE_LICENSE("GPL"); diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c index 496d5f3646a..42c3a27a14c 100644 --- a/drivers/md/dm-delay.c +++ b/drivers/md/dm-delay.c @@ -20,10 +20,10 @@  struct delay_c {  	struct timer_list delay_timer;  	struct mutex timer_lock; +	struct workqueue_struct *kdelayd_wq;  	struct work_struct flush_expired_bios;  	struct list_head delayed_bios;  	atomic_t may_delay; -	mempool_t *delayed_pool;  	struct dm_dev *dev_read;  	sector_t start_read; @@ -39,20 +39,16 @@ struct delay_c {  struct dm_delay_info {  	struct delay_c *context;  	struct list_head list; -	struct bio *bio;  	unsigned long expires;  };  static DEFINE_MUTEX(delayed_bios_lock); -static struct workqueue_struct *kdelayd_wq; -static struct kmem_cache *delayed_cache; -  static void handle_delayed_timer(unsigned long data)  {  	struct delay_c *dc = (struct delay_c *)data; -	queue_work(kdelayd_wq, &dc->flush_expired_bios); +	queue_work(dc->kdelayd_wq, &dc->flush_expired_bios);  }  static void queue_timeout(struct delay_c *dc, unsigned long expires) @@ -87,13 +83,14 @@ static struct bio *flush_delayed_bios(struct delay_c *dc, int flush_all)  	mutex_lock(&delayed_bios_lock);  	list_for_each_entry_safe(delayed, next, &dc->delayed_bios, list) {  		if (flush_all || time_after_eq(jiffies, delayed->expires)) { +			struct bio *bio = dm_bio_from_per_bio_data(delayed, +						sizeof(struct dm_delay_info));  			list_del(&delayed->list); -			bio_list_add(&flush_bios, delayed->bio); -			if ((bio_data_dir(delayed->bio) == WRITE)) +			bio_list_add(&flush_bios, bio); +			if ((bio_data_dir(bio) == WRITE))  				delayed->context->writes--;  			else  				delayed->context->reads--; -			mempool_free(delayed, dc->delayed_pool);  			continue;  		} @@ -185,10 +182,10 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)  	}  out: -	dc->delayed_pool = mempool_create_slab_pool(128, delayed_cache); -	if (!dc->delayed_pool) { -		DMERR("Couldn't create delayed bio pool."); -		goto bad_dev_write; +	dc->kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0); +	if (!dc->kdelayd_wq) { +		DMERR("Couldn't start kdelayd"); +		goto bad_queue;  	}  	setup_timer(&dc->delay_timer, handle_delayed_timer, (unsigned long)dc); @@ -200,10 +197,11 @@ out:  	ti->num_flush_bios = 1;  	ti->num_discard_bios = 1; +	ti->per_bio_data_size = sizeof(struct dm_delay_info);  	ti->private = dc;  	return 0; -bad_dev_write: +bad_queue:  	if (dc->dev_write)  		dm_put_device(ti, dc->dev_write);  bad_dev_read: @@ -217,14 +215,13 @@ static void delay_dtr(struct dm_target *ti)  {  	struct delay_c *dc = ti->private; -	flush_workqueue(kdelayd_wq); +	destroy_workqueue(dc->kdelayd_wq);  	dm_put_device(ti, dc->dev_read);  	if (dc->dev_write)  		dm_put_device(ti, dc->dev_write); -	mempool_destroy(dc->delayed_pool);  	kfree(dc);  } @@ -236,10 +233,9 @@ static int delay_bio(struct delay_c *dc, int delay, struct bio *bio)  	if (!delay || !atomic_read(&dc->may_delay))  		return 1; -	delayed = mempool_alloc(dc->delayed_pool, GFP_NOIO); +	delayed = dm_per_bio_data(bio, sizeof(struct dm_delay_info));  	delayed->context = dc; -	delayed->bio = bio;  	delayed->expires = expires = jiffies + (delay * HZ / 1000);  	mutex_lock(&delayed_bios_lock); @@ -281,14 +277,15 @@ static int delay_map(struct dm_target *ti, struct bio *bio)  	if ((bio_data_dir(bio) == WRITE) && (dc->dev_write)) {  		bio->bi_bdev = dc->dev_write->bdev;  		if (bio_sectors(bio)) -			bio->bi_sector = dc->start_write + -					 dm_target_offset(ti, bio->bi_sector); +			bio->bi_iter.bi_sector = dc->start_write + +				dm_target_offset(ti, bio->bi_iter.bi_sector);  		return delay_bio(dc, dc->write_delay, bio);  	}  	bio->bi_bdev = dc->dev_read->bdev; -	bio->bi_sector = dc->start_read + dm_target_offset(ti, bio->bi_sector); +	bio->bi_iter.bi_sector = dc->start_read + +		dm_target_offset(ti, bio->bi_iter.bi_sector);  	return delay_bio(dc, dc->read_delay, bio);  } @@ -348,19 +345,7 @@ static struct target_type delay_target = {  static int __init dm_delay_init(void)  { -	int r = -ENOMEM; - -	kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0); -	if (!kdelayd_wq) { -		DMERR("Couldn't start kdelayd"); -		goto bad_queue; -	} - -	delayed_cache = KMEM_CACHE(dm_delay_info, 0); -	if (!delayed_cache) { -		DMERR("Couldn't create delayed bio cache."); -		goto bad_memcache; -	} +	int r;  	r = dm_register_target(&delay_target);  	if (r < 0) { @@ -371,18 +356,12 @@ static int __init dm_delay_init(void)  	return 0;  bad_register: -	kmem_cache_destroy(delayed_cache); -bad_memcache: -	destroy_workqueue(kdelayd_wq); -bad_queue:  	return r;  }  static void __exit dm_delay_exit(void)  {  	dm_unregister_target(&delay_target); -	kmem_cache_destroy(delayed_cache); -	destroy_workqueue(kdelayd_wq);  }  /* Module hooks */ diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c new file mode 100644 index 00000000000..ad913cd4ade --- /dev/null +++ b/drivers/md/dm-era-target.c @@ -0,0 +1,1747 @@ +#include "dm.h" +#include "persistent-data/dm-transaction-manager.h" +#include "persistent-data/dm-bitset.h" +#include "persistent-data/dm-space-map.h" + +#include <linux/dm-io.h> +#include <linux/dm-kcopyd.h> +#include <linux/init.h> +#include <linux/mempool.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> + +#define DM_MSG_PREFIX "era" + +#define SUPERBLOCK_LOCATION 0 +#define SUPERBLOCK_MAGIC 2126579579 +#define SUPERBLOCK_CSUM_XOR 146538381 +#define MIN_ERA_VERSION 1 +#define MAX_ERA_VERSION 1 +#define INVALID_WRITESET_ROOT SUPERBLOCK_LOCATION +#define MIN_BLOCK_SIZE 8 + +/*---------------------------------------------------------------- + * Writeset + *--------------------------------------------------------------*/ +struct writeset_metadata { +	uint32_t nr_bits; +	dm_block_t root; +}; + +struct writeset { +	struct writeset_metadata md; + +	/* +	 * An in core copy of the bits to save constantly doing look ups on +	 * disk. +	 */ +	unsigned long *bits; +}; + +/* + * This does not free off the on disk bitset as this will normally be done + * after digesting into the era array. + */ +static void writeset_free(struct writeset *ws) +{ +	vfree(ws->bits); +} + +static int setup_on_disk_bitset(struct dm_disk_bitset *info, +				unsigned nr_bits, dm_block_t *root) +{ +	int r; + +	r = dm_bitset_empty(info, root); +	if (r) +		return r; + +	return dm_bitset_resize(info, *root, 0, nr_bits, false, root); +} + +static size_t bitset_size(unsigned nr_bits) +{ +	return sizeof(unsigned long) * dm_div_up(nr_bits, BITS_PER_LONG); +} + +/* + * Allocates memory for the in core bitset. + */ +static int writeset_alloc(struct writeset *ws, dm_block_t nr_blocks) +{ +	ws->md.nr_bits = nr_blocks; +	ws->md.root = INVALID_WRITESET_ROOT; +	ws->bits = vzalloc(bitset_size(nr_blocks)); +	if (!ws->bits) { +		DMERR("%s: couldn't allocate in memory bitset", __func__); +		return -ENOMEM; +	} + +	return 0; +} + +/* + * Wipes the in-core bitset, and creates a new on disk bitset. + */ +static int writeset_init(struct dm_disk_bitset *info, struct writeset *ws) +{ +	int r; + +	memset(ws->bits, 0, bitset_size(ws->md.nr_bits)); + +	r = setup_on_disk_bitset(info, ws->md.nr_bits, &ws->md.root); +	if (r) { +		DMERR("%s: setup_on_disk_bitset failed", __func__); +		return r; +	} + +	return 0; +} + +static bool writeset_marked(struct writeset *ws, dm_block_t block) +{ +	return test_bit(block, ws->bits); +} + +static int writeset_marked_on_disk(struct dm_disk_bitset *info, +				   struct writeset_metadata *m, dm_block_t block, +				   bool *result) +{ +	dm_block_t old = m->root; + +	/* +	 * The bitset was flushed when it was archived, so we know there'll +	 * be no change to the root. +	 */ +	int r = dm_bitset_test_bit(info, m->root, block, &m->root, result); +	if (r) { +		DMERR("%s: dm_bitset_test_bit failed", __func__); +		return r; +	} + +	BUG_ON(m->root != old); + +	return r; +} + +/* + * Returns < 0 on error, 0 if the bit wasn't previously set, 1 if it was. + */ +static int writeset_test_and_set(struct dm_disk_bitset *info, +				 struct writeset *ws, uint32_t block) +{ +	int r; + +	if (!test_and_set_bit(block, ws->bits)) { +		r = dm_bitset_set_bit(info, ws->md.root, block, &ws->md.root); +		if (r) { +			/* FIXME: fail mode */ +			return r; +		} + +		return 0; +	} + +	return 1; +} + +/*---------------------------------------------------------------- + * On disk metadata layout + *--------------------------------------------------------------*/ +#define SPACE_MAP_ROOT_SIZE 128 +#define UUID_LEN 16 + +struct writeset_disk { +	__le32 nr_bits; +	__le64 root; +} __packed; + +struct superblock_disk { +	__le32 csum; +	__le32 flags; +	__le64 blocknr; + +	__u8 uuid[UUID_LEN]; +	__le64 magic; +	__le32 version; + +	__u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE]; + +	__le32 data_block_size; +	__le32 metadata_block_size; +	__le32 nr_blocks; + +	__le32 current_era; +	struct writeset_disk current_writeset; + +	/* +	 * Only these two fields are valid within the metadata snapshot. +	 */ +	__le64 writeset_tree_root; +	__le64 era_array_root; + +	__le64 metadata_snap; +} __packed; + +/*---------------------------------------------------------------- + * Superblock validation + *--------------------------------------------------------------*/ +static void sb_prepare_for_write(struct dm_block_validator *v, +				 struct dm_block *b, +				 size_t sb_block_size) +{ +	struct superblock_disk *disk = dm_block_data(b); + +	disk->blocknr = cpu_to_le64(dm_block_location(b)); +	disk->csum = cpu_to_le32(dm_bm_checksum(&disk->flags, +						sb_block_size - sizeof(__le32), +						SUPERBLOCK_CSUM_XOR)); +} + +static int check_metadata_version(struct superblock_disk *disk) +{ +	uint32_t metadata_version = le32_to_cpu(disk->version); +	if (metadata_version < MIN_ERA_VERSION || metadata_version > MAX_ERA_VERSION) { +		DMERR("Era metadata version %u found, but only versions between %u and %u supported.", +		      metadata_version, MIN_ERA_VERSION, MAX_ERA_VERSION); +		return -EINVAL; +	} + +	return 0; +} + +static int sb_check(struct dm_block_validator *v, +		    struct dm_block *b, +		    size_t sb_block_size) +{ +	struct superblock_disk *disk = dm_block_data(b); +	__le32 csum_le; + +	if (dm_block_location(b) != le64_to_cpu(disk->blocknr)) { +		DMERR("sb_check failed: blocknr %llu: wanted %llu", +		      le64_to_cpu(disk->blocknr), +		      (unsigned long long)dm_block_location(b)); +		return -ENOTBLK; +	} + +	if (le64_to_cpu(disk->magic) != SUPERBLOCK_MAGIC) { +		DMERR("sb_check failed: magic %llu: wanted %llu", +		      le64_to_cpu(disk->magic), +		      (unsigned long long) SUPERBLOCK_MAGIC); +		return -EILSEQ; +	} + +	csum_le = cpu_to_le32(dm_bm_checksum(&disk->flags, +					     sb_block_size - sizeof(__le32), +					     SUPERBLOCK_CSUM_XOR)); +	if (csum_le != disk->csum) { +		DMERR("sb_check failed: csum %u: wanted %u", +		      le32_to_cpu(csum_le), le32_to_cpu(disk->csum)); +		return -EILSEQ; +	} + +	return check_metadata_version(disk); +} + +static struct dm_block_validator sb_validator = { +	.name = "superblock", +	.prepare_for_write = sb_prepare_for_write, +	.check = sb_check +}; + +/*---------------------------------------------------------------- + * Low level metadata handling + *--------------------------------------------------------------*/ +#define DM_ERA_METADATA_BLOCK_SIZE 4096 +#define DM_ERA_METADATA_CACHE_SIZE 64 +#define ERA_MAX_CONCURRENT_LOCKS 5 + +struct era_metadata { +	struct block_device *bdev; +	struct dm_block_manager *bm; +	struct dm_space_map *sm; +	struct dm_transaction_manager *tm; + +	dm_block_t block_size; +	uint32_t nr_blocks; + +	uint32_t current_era; + +	/* +	 * We preallocate 2 writesets.  When an era rolls over we +	 * switch between them. This means the allocation is done at +	 * preresume time, rather than on the io path. +	 */ +	struct writeset writesets[2]; +	struct writeset *current_writeset; + +	dm_block_t writeset_tree_root; +	dm_block_t era_array_root; + +	struct dm_disk_bitset bitset_info; +	struct dm_btree_info writeset_tree_info; +	struct dm_array_info era_array_info; + +	dm_block_t metadata_snap; + +	/* +	 * A flag that is set whenever a writeset has been archived. +	 */ +	bool archived_writesets; + +	/* +	 * Reading the space map root can fail, so we read it into this +	 * buffer before the superblock is locked and updated. +	 */ +	__u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE]; +}; + +static int superblock_read_lock(struct era_metadata *md, +				struct dm_block **sblock) +{ +	return dm_bm_read_lock(md->bm, SUPERBLOCK_LOCATION, +			       &sb_validator, sblock); +} + +static int superblock_lock_zero(struct era_metadata *md, +				struct dm_block **sblock) +{ +	return dm_bm_write_lock_zero(md->bm, SUPERBLOCK_LOCATION, +				     &sb_validator, sblock); +} + +static int superblock_lock(struct era_metadata *md, +			   struct dm_block **sblock) +{ +	return dm_bm_write_lock(md->bm, SUPERBLOCK_LOCATION, +				&sb_validator, sblock); +} + +/* FIXME: duplication with cache and thin */ +static int superblock_all_zeroes(struct dm_block_manager *bm, bool *result) +{ +	int r; +	unsigned i; +	struct dm_block *b; +	__le64 *data_le, zero = cpu_to_le64(0); +	unsigned sb_block_size = dm_bm_block_size(bm) / sizeof(__le64); + +	/* +	 * We can't use a validator here - it may be all zeroes. +	 */ +	r = dm_bm_read_lock(bm, SUPERBLOCK_LOCATION, NULL, &b); +	if (r) +		return r; + +	data_le = dm_block_data(b); +	*result = true; +	for (i = 0; i < sb_block_size; i++) { +		if (data_le[i] != zero) { +			*result = false; +			break; +		} +	} + +	return dm_bm_unlock(b); +} + +/*----------------------------------------------------------------*/ + +static void ws_pack(const struct writeset_metadata *core, struct writeset_disk *disk) +{ +	disk->nr_bits = cpu_to_le32(core->nr_bits); +	disk->root = cpu_to_le64(core->root); +} + +static void ws_unpack(const struct writeset_disk *disk, struct writeset_metadata *core) +{ +	core->nr_bits = le32_to_cpu(disk->nr_bits); +	core->root = le64_to_cpu(disk->root); +} + +static void ws_inc(void *context, const void *value) +{ +	struct era_metadata *md = context; +	struct writeset_disk ws_d; +	dm_block_t b; + +	memcpy(&ws_d, value, sizeof(ws_d)); +	b = le64_to_cpu(ws_d.root); + +	dm_tm_inc(md->tm, b); +} + +static void ws_dec(void *context, const void *value) +{ +	struct era_metadata *md = context; +	struct writeset_disk ws_d; +	dm_block_t b; + +	memcpy(&ws_d, value, sizeof(ws_d)); +	b = le64_to_cpu(ws_d.root); + +	dm_bitset_del(&md->bitset_info, b); +} + +static int ws_eq(void *context, const void *value1, const void *value2) +{ +	return !memcmp(value1, value2, sizeof(struct writeset_metadata)); +} + +/*----------------------------------------------------------------*/ + +static void setup_writeset_tree_info(struct era_metadata *md) +{ +	struct dm_btree_value_type *vt = &md->writeset_tree_info.value_type; +	md->writeset_tree_info.tm = md->tm; +	md->writeset_tree_info.levels = 1; +	vt->context = md; +	vt->size = sizeof(struct writeset_disk); +	vt->inc = ws_inc; +	vt->dec = ws_dec; +	vt->equal = ws_eq; +} + +static void setup_era_array_info(struct era_metadata *md) + +{ +	struct dm_btree_value_type vt; +	vt.context = NULL; +	vt.size = sizeof(__le32); +	vt.inc = NULL; +	vt.dec = NULL; +	vt.equal = NULL; + +	dm_array_info_init(&md->era_array_info, md->tm, &vt); +} + +static void setup_infos(struct era_metadata *md) +{ +	dm_disk_bitset_init(md->tm, &md->bitset_info); +	setup_writeset_tree_info(md); +	setup_era_array_info(md); +} + +/*----------------------------------------------------------------*/ + +static int create_fresh_metadata(struct era_metadata *md) +{ +	int r; + +	r = dm_tm_create_with_sm(md->bm, SUPERBLOCK_LOCATION, +				 &md->tm, &md->sm); +	if (r < 0) { +		DMERR("dm_tm_create_with_sm failed"); +		return r; +	} + +	setup_infos(md); + +	r = dm_btree_empty(&md->writeset_tree_info, &md->writeset_tree_root); +	if (r) { +		DMERR("couldn't create new writeset tree"); +		goto bad; +	} + +	r = dm_array_empty(&md->era_array_info, &md->era_array_root); +	if (r) { +		DMERR("couldn't create era array"); +		goto bad; +	} + +	return 0; + +bad: +	dm_sm_destroy(md->sm); +	dm_tm_destroy(md->tm); + +	return r; +} + +static int save_sm_root(struct era_metadata *md) +{ +	int r; +	size_t metadata_len; + +	r = dm_sm_root_size(md->sm, &metadata_len); +	if (r < 0) +		return r; + +	return dm_sm_copy_root(md->sm, &md->metadata_space_map_root, +			       metadata_len); +} + +static void copy_sm_root(struct era_metadata *md, struct superblock_disk *disk) +{ +	memcpy(&disk->metadata_space_map_root, +	       &md->metadata_space_map_root, +	       sizeof(md->metadata_space_map_root)); +} + +/* + * Writes a superblock, including the static fields that don't get updated + * with every commit (possible optimisation here).  'md' should be fully + * constructed when this is called. + */ +static void prepare_superblock(struct era_metadata *md, struct superblock_disk *disk) +{ +	disk->magic = cpu_to_le64(SUPERBLOCK_MAGIC); +	disk->flags = cpu_to_le32(0ul); + +	/* FIXME: can't keep blanking the uuid (uuid is currently unused though) */ +	memset(disk->uuid, 0, sizeof(disk->uuid)); +	disk->version = cpu_to_le32(MAX_ERA_VERSION); + +	copy_sm_root(md, disk); + +	disk->data_block_size = cpu_to_le32(md->block_size); +	disk->metadata_block_size = cpu_to_le32(DM_ERA_METADATA_BLOCK_SIZE >> SECTOR_SHIFT); +	disk->nr_blocks = cpu_to_le32(md->nr_blocks); +	disk->current_era = cpu_to_le32(md->current_era); + +	ws_pack(&md->current_writeset->md, &disk->current_writeset); +	disk->writeset_tree_root = cpu_to_le64(md->writeset_tree_root); +	disk->era_array_root = cpu_to_le64(md->era_array_root); +	disk->metadata_snap = cpu_to_le64(md->metadata_snap); +} + +static int write_superblock(struct era_metadata *md) +{ +	int r; +	struct dm_block *sblock; +	struct superblock_disk *disk; + +	r = save_sm_root(md); +	if (r) { +		DMERR("%s: save_sm_root failed", __func__); +		return r; +	} + +	r = superblock_lock_zero(md, &sblock); +	if (r) +		return r; + +	disk = dm_block_data(sblock); +	prepare_superblock(md, disk); + +	return dm_tm_commit(md->tm, sblock); +} + +/* + * Assumes block_size and the infos are set. + */ +static int format_metadata(struct era_metadata *md) +{ +	int r; + +	r = create_fresh_metadata(md); +	if (r) +		return r; + +	r = write_superblock(md); +	if (r) { +		dm_sm_destroy(md->sm); +		dm_tm_destroy(md->tm); +		return r; +	} + +	return 0; +} + +static int open_metadata(struct era_metadata *md) +{ +	int r; +	struct dm_block *sblock; +	struct superblock_disk *disk; + +	r = superblock_read_lock(md, &sblock); +	if (r) { +		DMERR("couldn't read_lock superblock"); +		return r; +	} + +	disk = dm_block_data(sblock); +	r = dm_tm_open_with_sm(md->bm, SUPERBLOCK_LOCATION, +			       disk->metadata_space_map_root, +			       sizeof(disk->metadata_space_map_root), +			       &md->tm, &md->sm); +	if (r) { +		DMERR("dm_tm_open_with_sm failed"); +		goto bad; +	} + +	setup_infos(md); + +	md->block_size = le32_to_cpu(disk->data_block_size); +	md->nr_blocks = le32_to_cpu(disk->nr_blocks); +	md->current_era = le32_to_cpu(disk->current_era); + +	md->writeset_tree_root = le64_to_cpu(disk->writeset_tree_root); +	md->era_array_root = le64_to_cpu(disk->era_array_root); +	md->metadata_snap = le64_to_cpu(disk->metadata_snap); +	md->archived_writesets = true; + +	return dm_bm_unlock(sblock); + +bad: +	dm_bm_unlock(sblock); +	return r; +} + +static int open_or_format_metadata(struct era_metadata *md, +				   bool may_format) +{ +	int r; +	bool unformatted = false; + +	r = superblock_all_zeroes(md->bm, &unformatted); +	if (r) +		return r; + +	if (unformatted) +		return may_format ? format_metadata(md) : -EPERM; + +	return open_metadata(md); +} + +static int create_persistent_data_objects(struct era_metadata *md, +					  bool may_format) +{ +	int r; + +	md->bm = dm_block_manager_create(md->bdev, DM_ERA_METADATA_BLOCK_SIZE, +					 DM_ERA_METADATA_CACHE_SIZE, +					 ERA_MAX_CONCURRENT_LOCKS); +	if (IS_ERR(md->bm)) { +		DMERR("could not create block manager"); +		return PTR_ERR(md->bm); +	} + +	r = open_or_format_metadata(md, may_format); +	if (r) +		dm_block_manager_destroy(md->bm); + +	return r; +} + +static void destroy_persistent_data_objects(struct era_metadata *md) +{ +	dm_sm_destroy(md->sm); +	dm_tm_destroy(md->tm); +	dm_block_manager_destroy(md->bm); +} + +/* + * This waits until all era_map threads have picked up the new filter. + */ +static void swap_writeset(struct era_metadata *md, struct writeset *new_writeset) +{ +	rcu_assign_pointer(md->current_writeset, new_writeset); +	synchronize_rcu(); +} + +/*---------------------------------------------------------------- + * Writesets get 'digested' into the main era array. + * + * We're using a coroutine here so the worker thread can do the digestion, + * thus avoiding synchronisation of the metadata.  Digesting a whole + * writeset in one go would cause too much latency. + *--------------------------------------------------------------*/ +struct digest { +	uint32_t era; +	unsigned nr_bits, current_bit; +	struct writeset_metadata writeset; +	__le32 value; +	struct dm_disk_bitset info; + +	int (*step)(struct era_metadata *, struct digest *); +}; + +static int metadata_digest_lookup_writeset(struct era_metadata *md, +					   struct digest *d); + +static int metadata_digest_remove_writeset(struct era_metadata *md, +					   struct digest *d) +{ +	int r; +	uint64_t key = d->era; + +	r = dm_btree_remove(&md->writeset_tree_info, md->writeset_tree_root, +			    &key, &md->writeset_tree_root); +	if (r) { +		DMERR("%s: dm_btree_remove failed", __func__); +		return r; +	} + +	d->step = metadata_digest_lookup_writeset; +	return 0; +} + +#define INSERTS_PER_STEP 100 + +static int metadata_digest_transcribe_writeset(struct era_metadata *md, +					       struct digest *d) +{ +	int r; +	bool marked; +	unsigned b, e = min(d->current_bit + INSERTS_PER_STEP, d->nr_bits); + +	for (b = d->current_bit; b < e; b++) { +		r = writeset_marked_on_disk(&d->info, &d->writeset, b, &marked); +		if (r) { +			DMERR("%s: writeset_marked_on_disk failed", __func__); +			return r; +		} + +		if (!marked) +			continue; + +		__dm_bless_for_disk(&d->value); +		r = dm_array_set_value(&md->era_array_info, md->era_array_root, +				       b, &d->value, &md->era_array_root); +		if (r) { +			DMERR("%s: dm_array_set_value failed", __func__); +			return r; +		} +	} + +	if (b == d->nr_bits) +		d->step = metadata_digest_remove_writeset; +	else +		d->current_bit = b; + +	return 0; +} + +static int metadata_digest_lookup_writeset(struct era_metadata *md, +					   struct digest *d) +{ +	int r; +	uint64_t key; +	struct writeset_disk disk; + +	r = dm_btree_find_lowest_key(&md->writeset_tree_info, +				     md->writeset_tree_root, &key); +	if (r < 0) +		return r; + +	d->era = key; + +	r = dm_btree_lookup(&md->writeset_tree_info, +			    md->writeset_tree_root, &key, &disk); +	if (r) { +		if (r == -ENODATA) { +			d->step = NULL; +			return 0; +		} + +		DMERR("%s: dm_btree_lookup failed", __func__); +		return r; +	} + +	ws_unpack(&disk, &d->writeset); +	d->value = cpu_to_le32(key); + +	d->nr_bits = min(d->writeset.nr_bits, md->nr_blocks); +	d->current_bit = 0; +	d->step = metadata_digest_transcribe_writeset; + +	return 0; +} + +static int metadata_digest_start(struct era_metadata *md, struct digest *d) +{ +	if (d->step) +		return 0; + +	memset(d, 0, sizeof(*d)); + +	/* +	 * We initialise another bitset info to avoid any caching side +	 * effects with the previous one. +	 */ +	dm_disk_bitset_init(md->tm, &d->info); +	d->step = metadata_digest_lookup_writeset; + +	return 0; +} + +/*---------------------------------------------------------------- + * High level metadata interface.  Target methods should use these, and not + * the lower level ones. + *--------------------------------------------------------------*/ +static struct era_metadata *metadata_open(struct block_device *bdev, +					  sector_t block_size, +					  bool may_format) +{ +	int r; +	struct era_metadata *md = kzalloc(sizeof(*md), GFP_KERNEL); + +	if (!md) +		return NULL; + +	md->bdev = bdev; +	md->block_size = block_size; + +	md->writesets[0].md.root = INVALID_WRITESET_ROOT; +	md->writesets[1].md.root = INVALID_WRITESET_ROOT; +	md->current_writeset = &md->writesets[0]; + +	r = create_persistent_data_objects(md, may_format); +	if (r) { +		kfree(md); +		return ERR_PTR(r); +	} + +	return md; +} + +static void metadata_close(struct era_metadata *md) +{ +	destroy_persistent_data_objects(md); +	kfree(md); +} + +static bool valid_nr_blocks(dm_block_t n) +{ +	/* +	 * dm_bitset restricts us to 2^32.  test_bit & co. restrict us +	 * further to 2^31 - 1 +	 */ +	return n < (1ull << 31); +} + +static int metadata_resize(struct era_metadata *md, void *arg) +{ +	int r; +	dm_block_t *new_size = arg; +	__le32 value; + +	if (!valid_nr_blocks(*new_size)) { +		DMERR("Invalid number of origin blocks %llu", +		      (unsigned long long) *new_size); +		return -EINVAL; +	} + +	writeset_free(&md->writesets[0]); +	writeset_free(&md->writesets[1]); + +	r = writeset_alloc(&md->writesets[0], *new_size); +	if (r) { +		DMERR("%s: writeset_alloc failed for writeset 0", __func__); +		return r; +	} + +	r = writeset_alloc(&md->writesets[1], *new_size); +	if (r) { +		DMERR("%s: writeset_alloc failed for writeset 1", __func__); +		return r; +	} + +	value = cpu_to_le32(0u); +	__dm_bless_for_disk(&value); +	r = dm_array_resize(&md->era_array_info, md->era_array_root, +			    md->nr_blocks, *new_size, +			    &value, &md->era_array_root); +	if (r) { +		DMERR("%s: dm_array_resize failed", __func__); +		return r; +	} + +	md->nr_blocks = *new_size; +	return 0; +} + +static int metadata_era_archive(struct era_metadata *md) +{ +	int r; +	uint64_t keys[1]; +	struct writeset_disk value; + +	r = dm_bitset_flush(&md->bitset_info, md->current_writeset->md.root, +			    &md->current_writeset->md.root); +	if (r) { +		DMERR("%s: dm_bitset_flush failed", __func__); +		return r; +	} + +	ws_pack(&md->current_writeset->md, &value); +	md->current_writeset->md.root = INVALID_WRITESET_ROOT; + +	keys[0] = md->current_era; +	__dm_bless_for_disk(&value); +	r = dm_btree_insert(&md->writeset_tree_info, md->writeset_tree_root, +			    keys, &value, &md->writeset_tree_root); +	if (r) { +		DMERR("%s: couldn't insert writeset into btree", __func__); +		/* FIXME: fail mode */ +		return r; +	} + +	md->archived_writesets = true; + +	return 0; +} + +static struct writeset *next_writeset(struct era_metadata *md) +{ +	return (md->current_writeset == &md->writesets[0]) ? +		&md->writesets[1] : &md->writesets[0]; +} + +static int metadata_new_era(struct era_metadata *md) +{ +	int r; +	struct writeset *new_writeset = next_writeset(md); + +	r = writeset_init(&md->bitset_info, new_writeset); +	if (r) { +		DMERR("%s: writeset_init failed", __func__); +		return r; +	} + +	swap_writeset(md, new_writeset); +	md->current_era++; + +	return 0; +} + +static int metadata_era_rollover(struct era_metadata *md) +{ +	int r; + +	if (md->current_writeset->md.root != INVALID_WRITESET_ROOT) { +		r = metadata_era_archive(md); +		if (r) { +			DMERR("%s: metadata_archive_era failed", __func__); +			/* FIXME: fail mode? */ +			return r; +		} +	} + +	r = metadata_new_era(md); +	if (r) { +		DMERR("%s: new era failed", __func__); +		/* FIXME: fail mode */ +		return r; +	} + +	return 0; +} + +static bool metadata_current_marked(struct era_metadata *md, dm_block_t block) +{ +	bool r; +	struct writeset *ws; + +	rcu_read_lock(); +	ws = rcu_dereference(md->current_writeset); +	r = writeset_marked(ws, block); +	rcu_read_unlock(); + +	return r; +} + +static int metadata_commit(struct era_metadata *md) +{ +	int r; +	struct dm_block *sblock; + +	if (md->current_writeset->md.root != SUPERBLOCK_LOCATION) { +		r = dm_bitset_flush(&md->bitset_info, md->current_writeset->md.root, +				    &md->current_writeset->md.root); +		if (r) { +			DMERR("%s: bitset flush failed", __func__); +			return r; +		} +	} + +	r = save_sm_root(md); +	if (r) { +		DMERR("%s: save_sm_root failed", __func__); +		return r; +	} + +	r = dm_tm_pre_commit(md->tm); +	if (r) { +		DMERR("%s: pre commit failed", __func__); +		return r; +	} + +	r = superblock_lock(md, &sblock); +	if (r) { +		DMERR("%s: superblock lock failed", __func__); +		return r; +	} + +	prepare_superblock(md, dm_block_data(sblock)); + +	return dm_tm_commit(md->tm, sblock); +} + +static int metadata_checkpoint(struct era_metadata *md) +{ +	/* +	 * For now we just rollover, but later I want to put a check in to +	 * avoid this if the filter is still pretty fresh. +	 */ +	return metadata_era_rollover(md); +} + +/* + * Metadata snapshots allow userland to access era data. + */ +static int metadata_take_snap(struct era_metadata *md) +{ +	int r, inc; +	struct dm_block *clone; + +	if (md->metadata_snap != SUPERBLOCK_LOCATION) { +		DMERR("%s: metadata snapshot already exists", __func__); +		return -EINVAL; +	} + +	r = metadata_era_rollover(md); +	if (r) { +		DMERR("%s: era rollover failed", __func__); +		return r; +	} + +	r = metadata_commit(md); +	if (r) { +		DMERR("%s: pre commit failed", __func__); +		return r; +	} + +	r = dm_sm_inc_block(md->sm, SUPERBLOCK_LOCATION); +	if (r) { +		DMERR("%s: couldn't increment superblock", __func__); +		return r; +	} + +	r = dm_tm_shadow_block(md->tm, SUPERBLOCK_LOCATION, +			       &sb_validator, &clone, &inc); +	if (r) { +		DMERR("%s: couldn't shadow superblock", __func__); +		dm_sm_dec_block(md->sm, SUPERBLOCK_LOCATION); +		return r; +	} +	BUG_ON(!inc); + +	r = dm_sm_inc_block(md->sm, md->writeset_tree_root); +	if (r) { +		DMERR("%s: couldn't inc writeset tree root", __func__); +		dm_tm_unlock(md->tm, clone); +		return r; +	} + +	r = dm_sm_inc_block(md->sm, md->era_array_root); +	if (r) { +		DMERR("%s: couldn't inc era tree root", __func__); +		dm_sm_dec_block(md->sm, md->writeset_tree_root); +		dm_tm_unlock(md->tm, clone); +		return r; +	} + +	md->metadata_snap = dm_block_location(clone); + +	r = dm_tm_unlock(md->tm, clone); +	if (r) { +		DMERR("%s: couldn't unlock clone", __func__); +		md->metadata_snap = SUPERBLOCK_LOCATION; +		return r; +	} + +	return 0; +} + +static int metadata_drop_snap(struct era_metadata *md) +{ +	int r; +	dm_block_t location; +	struct dm_block *clone; +	struct superblock_disk *disk; + +	if (md->metadata_snap == SUPERBLOCK_LOCATION) { +		DMERR("%s: no snap to drop", __func__); +		return -EINVAL; +	} + +	r = dm_tm_read_lock(md->tm, md->metadata_snap, &sb_validator, &clone); +	if (r) { +		DMERR("%s: couldn't read lock superblock clone", __func__); +		return r; +	} + +	/* +	 * Whatever happens now we'll commit with no record of the metadata +	 * snap. +	 */ +	md->metadata_snap = SUPERBLOCK_LOCATION; + +	disk = dm_block_data(clone); +	r = dm_btree_del(&md->writeset_tree_info, +			 le64_to_cpu(disk->writeset_tree_root)); +	if (r) { +		DMERR("%s: error deleting writeset tree clone", __func__); +		dm_tm_unlock(md->tm, clone); +		return r; +	} + +	r = dm_array_del(&md->era_array_info, le64_to_cpu(disk->era_array_root)); +	if (r) { +		DMERR("%s: error deleting era array clone", __func__); +		dm_tm_unlock(md->tm, clone); +		return r; +	} + +	location = dm_block_location(clone); +	dm_tm_unlock(md->tm, clone); + +	return dm_sm_dec_block(md->sm, location); +} + +struct metadata_stats { +	dm_block_t used; +	dm_block_t total; +	dm_block_t snap; +	uint32_t era; +}; + +static int metadata_get_stats(struct era_metadata *md, void *ptr) +{ +	int r; +	struct metadata_stats *s = ptr; +	dm_block_t nr_free, nr_total; + +	r = dm_sm_get_nr_free(md->sm, &nr_free); +	if (r) { +		DMERR("dm_sm_get_nr_free returned %d", r); +		return r; +	} + +	r = dm_sm_get_nr_blocks(md->sm, &nr_total); +	if (r) { +		DMERR("dm_pool_get_metadata_dev_size returned %d", r); +		return r; +	} + +	s->used = nr_total - nr_free; +	s->total = nr_total; +	s->snap = md->metadata_snap; +	s->era = md->current_era; + +	return 0; +} + +/*----------------------------------------------------------------*/ + +struct era { +	struct dm_target *ti; +	struct dm_target_callbacks callbacks; + +	struct dm_dev *metadata_dev; +	struct dm_dev *origin_dev; + +	dm_block_t nr_blocks; +	uint32_t sectors_per_block; +	int sectors_per_block_shift; +	struct era_metadata *md; + +	struct workqueue_struct *wq; +	struct work_struct worker; + +	spinlock_t deferred_lock; +	struct bio_list deferred_bios; + +	spinlock_t rpc_lock; +	struct list_head rpc_calls; + +	struct digest digest; +	atomic_t suspended; +}; + +struct rpc { +	struct list_head list; + +	int (*fn0)(struct era_metadata *); +	int (*fn1)(struct era_metadata *, void *); +	void *arg; +	int result; + +	struct completion complete; +}; + +/*---------------------------------------------------------------- + * Remapping. + *---------------------------------------------------------------*/ +static bool block_size_is_power_of_two(struct era *era) +{ +	return era->sectors_per_block_shift >= 0; +} + +static dm_block_t get_block(struct era *era, struct bio *bio) +{ +	sector_t block_nr = bio->bi_iter.bi_sector; + +	if (!block_size_is_power_of_two(era)) +		(void) sector_div(block_nr, era->sectors_per_block); +	else +		block_nr >>= era->sectors_per_block_shift; + +	return block_nr; +} + +static void remap_to_origin(struct era *era, struct bio *bio) +{ +	bio->bi_bdev = era->origin_dev->bdev; +} + +/*---------------------------------------------------------------- + * Worker thread + *--------------------------------------------------------------*/ +static void wake_worker(struct era *era) +{ +	if (!atomic_read(&era->suspended)) +		queue_work(era->wq, &era->worker); +} + +static void process_old_eras(struct era *era) +{ +	int r; + +	if (!era->digest.step) +		return; + +	r = era->digest.step(era->md, &era->digest); +	if (r < 0) { +		DMERR("%s: digest step failed, stopping digestion", __func__); +		era->digest.step = NULL; + +	} else if (era->digest.step) +		wake_worker(era); +} + +static void process_deferred_bios(struct era *era) +{ +	int r; +	struct bio_list deferred_bios, marked_bios; +	struct bio *bio; +	bool commit_needed = false; +	bool failed = false; + +	bio_list_init(&deferred_bios); +	bio_list_init(&marked_bios); + +	spin_lock(&era->deferred_lock); +	bio_list_merge(&deferred_bios, &era->deferred_bios); +	bio_list_init(&era->deferred_bios); +	spin_unlock(&era->deferred_lock); + +	while ((bio = bio_list_pop(&deferred_bios))) { +		r = writeset_test_and_set(&era->md->bitset_info, +					  era->md->current_writeset, +					  get_block(era, bio)); +		if (r < 0) { +			/* +			 * This is bad news, we need to rollback. +			 * FIXME: finish. +			 */ +			failed = true; + +		} else if (r == 0) +			commit_needed = true; + +		bio_list_add(&marked_bios, bio); +	} + +	if (commit_needed) { +		r = metadata_commit(era->md); +		if (r) +			failed = true; +	} + +	if (failed) +		while ((bio = bio_list_pop(&marked_bios))) +			bio_io_error(bio); +	else +		while ((bio = bio_list_pop(&marked_bios))) +			generic_make_request(bio); +} + +static void process_rpc_calls(struct era *era) +{ +	int r; +	bool need_commit = false; +	struct list_head calls; +	struct rpc *rpc, *tmp; + +	INIT_LIST_HEAD(&calls); +	spin_lock(&era->rpc_lock); +	list_splice_init(&era->rpc_calls, &calls); +	spin_unlock(&era->rpc_lock); + +	list_for_each_entry_safe(rpc, tmp, &calls, list) { +		rpc->result = rpc->fn0 ? rpc->fn0(era->md) : rpc->fn1(era->md, rpc->arg); +		need_commit = true; +	} + +	if (need_commit) { +		r = metadata_commit(era->md); +		if (r) +			list_for_each_entry_safe(rpc, tmp, &calls, list) +				rpc->result = r; +	} + +	list_for_each_entry_safe(rpc, tmp, &calls, list) +		complete(&rpc->complete); +} + +static void kick_off_digest(struct era *era) +{ +	if (era->md->archived_writesets) { +		era->md->archived_writesets = false; +		metadata_digest_start(era->md, &era->digest); +	} +} + +static void do_work(struct work_struct *ws) +{ +	struct era *era = container_of(ws, struct era, worker); + +	kick_off_digest(era); +	process_old_eras(era); +	process_deferred_bios(era); +	process_rpc_calls(era); +} + +static void defer_bio(struct era *era, struct bio *bio) +{ +	spin_lock(&era->deferred_lock); +	bio_list_add(&era->deferred_bios, bio); +	spin_unlock(&era->deferred_lock); + +	wake_worker(era); +} + +/* + * Make an rpc call to the worker to change the metadata. + */ +static int perform_rpc(struct era *era, struct rpc *rpc) +{ +	rpc->result = 0; +	init_completion(&rpc->complete); + +	spin_lock(&era->rpc_lock); +	list_add(&rpc->list, &era->rpc_calls); +	spin_unlock(&era->rpc_lock); + +	wake_worker(era); +	wait_for_completion(&rpc->complete); + +	return rpc->result; +} + +static int in_worker0(struct era *era, int (*fn)(struct era_metadata *)) +{ +	struct rpc rpc; +	rpc.fn0 = fn; +	rpc.fn1 = NULL; + +	return perform_rpc(era, &rpc); +} + +static int in_worker1(struct era *era, +		      int (*fn)(struct era_metadata *, void *), void *arg) +{ +	struct rpc rpc; +	rpc.fn0 = NULL; +	rpc.fn1 = fn; +	rpc.arg = arg; + +	return perform_rpc(era, &rpc); +} + +static void start_worker(struct era *era) +{ +	atomic_set(&era->suspended, 0); +} + +static void stop_worker(struct era *era) +{ +	atomic_set(&era->suspended, 1); +	flush_workqueue(era->wq); +} + +/*---------------------------------------------------------------- + * Target methods + *--------------------------------------------------------------*/ +static int dev_is_congested(struct dm_dev *dev, int bdi_bits) +{ +	struct request_queue *q = bdev_get_queue(dev->bdev); +	return bdi_congested(&q->backing_dev_info, bdi_bits); +} + +static int era_is_congested(struct dm_target_callbacks *cb, int bdi_bits) +{ +	struct era *era = container_of(cb, struct era, callbacks); +	return dev_is_congested(era->origin_dev, bdi_bits); +} + +static void era_destroy(struct era *era) +{ +	if (era->md) +		metadata_close(era->md); + +	if (era->wq) +		destroy_workqueue(era->wq); + +	if (era->origin_dev) +		dm_put_device(era->ti, era->origin_dev); + +	if (era->metadata_dev) +		dm_put_device(era->ti, era->metadata_dev); + +	kfree(era); +} + +static dm_block_t calc_nr_blocks(struct era *era) +{ +	return dm_sector_div_up(era->ti->len, era->sectors_per_block); +} + +static bool valid_block_size(dm_block_t block_size) +{ +	bool greater_than_zero = block_size > 0; +	bool multiple_of_min_block_size = (block_size & (MIN_BLOCK_SIZE - 1)) == 0; + +	return greater_than_zero && multiple_of_min_block_size; +} + +/* + * <metadata dev> <data dev> <data block size (sectors)> + */ +static int era_ctr(struct dm_target *ti, unsigned argc, char **argv) +{ +	int r; +	char dummy; +	struct era *era; +	struct era_metadata *md; + +	if (argc != 3) { +		ti->error = "Invalid argument count"; +		return -EINVAL; +	} + +	era = kzalloc(sizeof(*era), GFP_KERNEL); +	if (!era) { +		ti->error = "Error allocating era structure"; +		return -ENOMEM; +	} + +	era->ti = ti; + +	r = dm_get_device(ti, argv[0], FMODE_READ | FMODE_WRITE, &era->metadata_dev); +	if (r) { +		ti->error = "Error opening metadata device"; +		era_destroy(era); +		return -EINVAL; +	} + +	r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &era->origin_dev); +	if (r) { +		ti->error = "Error opening data device"; +		era_destroy(era); +		return -EINVAL; +	} + +	r = sscanf(argv[2], "%u%c", &era->sectors_per_block, &dummy); +	if (r != 1) { +		ti->error = "Error parsing block size"; +		era_destroy(era); +		return -EINVAL; +	} + +	r = dm_set_target_max_io_len(ti, era->sectors_per_block); +	if (r) { +		ti->error = "could not set max io len"; +		era_destroy(era); +		return -EINVAL; +	} + +	if (!valid_block_size(era->sectors_per_block)) { +		ti->error = "Invalid block size"; +		era_destroy(era); +		return -EINVAL; +	} +	if (era->sectors_per_block & (era->sectors_per_block - 1)) +		era->sectors_per_block_shift = -1; +	else +		era->sectors_per_block_shift = __ffs(era->sectors_per_block); + +	md = metadata_open(era->metadata_dev->bdev, era->sectors_per_block, true); +	if (IS_ERR(md)) { +		ti->error = "Error reading metadata"; +		era_destroy(era); +		return PTR_ERR(md); +	} +	era->md = md; + +	era->nr_blocks = calc_nr_blocks(era); + +	r = metadata_resize(era->md, &era->nr_blocks); +	if (r) { +		ti->error = "couldn't resize metadata"; +		era_destroy(era); +		return -ENOMEM; +	} + +	era->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM); +	if (!era->wq) { +		ti->error = "could not create workqueue for metadata object"; +		era_destroy(era); +		return -ENOMEM; +	} +	INIT_WORK(&era->worker, do_work); + +	spin_lock_init(&era->deferred_lock); +	bio_list_init(&era->deferred_bios); + +	spin_lock_init(&era->rpc_lock); +	INIT_LIST_HEAD(&era->rpc_calls); + +	ti->private = era; +	ti->num_flush_bios = 1; +	ti->flush_supported = true; + +	ti->num_discard_bios = 1; +	ti->discards_supported = true; +	era->callbacks.congested_fn = era_is_congested; +	dm_table_add_target_callbacks(ti->table, &era->callbacks); + +	return 0; +} + +static void era_dtr(struct dm_target *ti) +{ +	era_destroy(ti->private); +} + +static int era_map(struct dm_target *ti, struct bio *bio) +{ +	struct era *era = ti->private; +	dm_block_t block = get_block(era, bio); + +	/* +	 * All bios get remapped to the origin device.  We do this now, but +	 * it may not get issued until later.  Depending on whether the +	 * block is marked in this era. +	 */ +	remap_to_origin(era, bio); + +	/* +	 * REQ_FLUSH bios carry no data, so we're not interested in them. +	 */ +	if (!(bio->bi_rw & REQ_FLUSH) && +	    (bio_data_dir(bio) == WRITE) && +	    !metadata_current_marked(era->md, block)) { +		defer_bio(era, bio); +		return DM_MAPIO_SUBMITTED; +	} + +	return DM_MAPIO_REMAPPED; +} + +static void era_postsuspend(struct dm_target *ti) +{ +	int r; +	struct era *era = ti->private; + +	r = in_worker0(era, metadata_era_archive); +	if (r) { +		DMERR("%s: couldn't archive current era", __func__); +		/* FIXME: fail mode */ +	} + +	stop_worker(era); +} + +static int era_preresume(struct dm_target *ti) +{ +	int r; +	struct era *era = ti->private; +	dm_block_t new_size = calc_nr_blocks(era); + +	if (era->nr_blocks != new_size) { +		r = in_worker1(era, metadata_resize, &new_size); +		if (r) +			return r; + +		era->nr_blocks = new_size; +	} + +	start_worker(era); + +	r = in_worker0(era, metadata_new_era); +	if (r) { +		DMERR("%s: metadata_era_rollover failed", __func__); +		return r; +	} + +	return 0; +} + +/* + * Status format: + * + * <metadata block size> <#used metadata blocks>/<#total metadata blocks> + * <current era> <held metadata root | '-'> + */ +static void era_status(struct dm_target *ti, status_type_t type, +		       unsigned status_flags, char *result, unsigned maxlen) +{ +	int r; +	struct era *era = ti->private; +	ssize_t sz = 0; +	struct metadata_stats stats; +	char buf[BDEVNAME_SIZE]; + +	switch (type) { +	case STATUSTYPE_INFO: +		r = in_worker1(era, metadata_get_stats, &stats); +		if (r) +			goto err; + +		DMEMIT("%u %llu/%llu %u", +		       (unsigned) (DM_ERA_METADATA_BLOCK_SIZE >> SECTOR_SHIFT), +		       (unsigned long long) stats.used, +		       (unsigned long long) stats.total, +		       (unsigned) stats.era); + +		if (stats.snap != SUPERBLOCK_LOCATION) +			DMEMIT(" %llu", stats.snap); +		else +			DMEMIT(" -"); +		break; + +	case STATUSTYPE_TABLE: +		format_dev_t(buf, era->metadata_dev->bdev->bd_dev); +		DMEMIT("%s ", buf); +		format_dev_t(buf, era->origin_dev->bdev->bd_dev); +		DMEMIT("%s %u", buf, era->sectors_per_block); +		break; +	} + +	return; + +err: +	DMEMIT("Error"); +} + +static int era_message(struct dm_target *ti, unsigned argc, char **argv) +{ +	struct era *era = ti->private; + +	if (argc != 1) { +		DMERR("incorrect number of message arguments"); +		return -EINVAL; +	} + +	if (!strcasecmp(argv[0], "checkpoint")) +		return in_worker0(era, metadata_checkpoint); + +	if (!strcasecmp(argv[0], "take_metadata_snap")) +		return in_worker0(era, metadata_take_snap); + +	if (!strcasecmp(argv[0], "drop_metadata_snap")) +		return in_worker0(era, metadata_drop_snap); + +	DMERR("unsupported message '%s'", argv[0]); +	return -EINVAL; +} + +static sector_t get_dev_size(struct dm_dev *dev) +{ +	return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT; +} + +static int era_iterate_devices(struct dm_target *ti, +			       iterate_devices_callout_fn fn, void *data) +{ +	struct era *era = ti->private; +	return fn(ti, era->origin_dev, 0, get_dev_size(era->origin_dev), data); +} + +static int era_merge(struct dm_target *ti, struct bvec_merge_data *bvm, +		     struct bio_vec *biovec, int max_size) +{ +	struct era *era = ti->private; +	struct request_queue *q = bdev_get_queue(era->origin_dev->bdev); + +	if (!q->merge_bvec_fn) +		return max_size; + +	bvm->bi_bdev = era->origin_dev->bdev; + +	return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); +} + +static void era_io_hints(struct dm_target *ti, struct queue_limits *limits) +{ +	struct era *era = ti->private; +	uint64_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT; + +	/* +	 * If the system-determined stacked limits are compatible with the +	 * era device's blocksize (io_opt is a factor) do not override them. +	 */ +	if (io_opt_sectors < era->sectors_per_block || +	    do_div(io_opt_sectors, era->sectors_per_block)) { +		blk_limits_io_min(limits, 0); +		blk_limits_io_opt(limits, era->sectors_per_block << SECTOR_SHIFT); +	} +} + +/*----------------------------------------------------------------*/ + +static struct target_type era_target = { +	.name = "era", +	.version = {1, 0, 0}, +	.module = THIS_MODULE, +	.ctr = era_ctr, +	.dtr = era_dtr, +	.map = era_map, +	.postsuspend = era_postsuspend, +	.preresume = era_preresume, +	.status = era_status, +	.message = era_message, +	.iterate_devices = era_iterate_devices, +	.merge = era_merge, +	.io_hints = era_io_hints +}; + +static int __init dm_era_init(void) +{ +	int r; + +	r = dm_register_target(&era_target); +	if (r) { +		DMERR("era target registration failed: %d", r); +		return r; +	} + +	return 0; +} + +static void __exit dm_era_exit(void) +{ +	dm_unregister_target(&era_target); +} + +module_init(dm_era_init); +module_exit(dm_era_exit); + +MODULE_DESCRIPTION(DM_NAME " era target"); +MODULE_AUTHOR("Joe Thornber <ejt@redhat.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c index c80a0ec5f12..b257e46876d 100644 --- a/drivers/md/dm-flakey.c +++ b/drivers/md/dm-flakey.c @@ -248,7 +248,8 @@ static void flakey_map_bio(struct dm_target *ti, struct bio *bio)  	bio->bi_bdev = fc->dev->bdev;  	if (bio_sectors(bio)) -		bio->bi_sector = flakey_map_sector(ti, bio->bi_sector); +		bio->bi_iter.bi_sector = +			flakey_map_sector(ti, bio->bi_iter.bi_sector);  }  static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc) @@ -265,8 +266,8 @@ static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)  		DMDEBUG("Corrupting data bio=%p by writing %u to byte %u "  			"(rw=%c bi_rw=%lu bi_sector=%llu cur_bytes=%u)\n",  			bio, fc->corrupt_bio_value, fc->corrupt_bio_byte, -			(bio_data_dir(bio) == WRITE) ? 'w' : 'r', -			bio->bi_rw, (unsigned long long)bio->bi_sector, bio_bytes); +			(bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_rw, +			(unsigned long long)bio->bi_iter.bi_sector, bio_bytes);  	}  } diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index ea49834377c..db404a0f7e2 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c @@ -10,6 +10,7 @@  #include <linux/device-mapper.h>  #include <linux/bio.h> +#include <linux/completion.h>  #include <linux/mempool.h>  #include <linux/module.h>  #include <linux/sched.h> @@ -19,8 +20,6 @@  #define DM_MSG_PREFIX "io"  #define DM_IO_MAX_REGIONS	BITS_PER_LONG -#define MIN_IOS		16 -#define MIN_BIOS	16  struct dm_io_client {  	mempool_t *pool; @@ -34,7 +33,7 @@ struct dm_io_client {  struct io {  	unsigned long error_bits;  	atomic_t count; -	struct task_struct *sleeper; +	struct completion *wait;  	struct dm_io_client *client;  	io_notify_fn callback;  	void *context; @@ -50,16 +49,17 @@ static struct kmem_cache *_dm_io_cache;  struct dm_io_client *dm_io_client_create(void)  {  	struct dm_io_client *client; +	unsigned min_ios = dm_get_reserved_bio_based_ios();  	client = kmalloc(sizeof(*client), GFP_KERNEL);  	if (!client)  		return ERR_PTR(-ENOMEM); -	client->pool = mempool_create_slab_pool(MIN_IOS, _dm_io_cache); +	client->pool = mempool_create_slab_pool(min_ios, _dm_io_cache);  	if (!client->pool)  		goto bad; -	client->bios = bioset_create(MIN_BIOS, 0); +	client->bios = bioset_create(min_ios, 0);  	if (!client->bios)  		goto bad; @@ -122,8 +122,8 @@ static void dec_count(struct io *io, unsigned int region, int error)  			invalidate_kernel_vmap_range(io->vma_invalidate_address,  						     io->vma_invalidate_size); -		if (io->sleeper) -			wake_up_process(io->sleeper); +		if (io->wait) +			complete(io->wait);  		else {  			unsigned long r = io->error_bits; @@ -202,26 +202,28 @@ static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offse  /*   * Functions for getting the pages from a bvec.   */ -static void bvec_get_page(struct dpages *dp, -		  struct page **p, unsigned long *len, unsigned *offset) +static void bio_get_page(struct dpages *dp, struct page **p, +			 unsigned long *len, unsigned *offset)  { -	struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr; +	struct bio_vec *bvec = dp->context_ptr;  	*p = bvec->bv_page; -	*len = bvec->bv_len; -	*offset = bvec->bv_offset; +	*len = bvec->bv_len - dp->context_u; +	*offset = bvec->bv_offset + dp->context_u;  } -static void bvec_next_page(struct dpages *dp) +static void bio_next_page(struct dpages *dp)  { -	struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr; +	struct bio_vec *bvec = dp->context_ptr;  	dp->context_ptr = bvec + 1; +	dp->context_u = 0;  } -static void bvec_dp_init(struct dpages *dp, struct bio_vec *bvec) +static void bio_dp_init(struct dpages *dp, struct bio *bio)  { -	dp->get_page = bvec_get_page; -	dp->next_page = bvec_next_page; -	dp->context_ptr = bvec; +	dp->get_page = bio_get_page; +	dp->next_page = bio_next_page; +	dp->context_ptr = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter); +	dp->context_u = bio->bi_iter.bi_bvec_done;  }  /* @@ -305,14 +307,14 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,  					  dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT)));  		bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios); -		bio->bi_sector = where->sector + (where->count - remaining); +		bio->bi_iter.bi_sector = where->sector + (where->count - remaining);  		bio->bi_bdev = where->bdev;  		bio->bi_end_io = endio;  		store_io_and_region_in_bio(bio, io, region);  		if (rw & REQ_DISCARD) {  			num_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining); -			bio->bi_size = num_sectors << SECTOR_SHIFT; +			bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;  			remaining -= num_sectors;  		} else if (rw & REQ_WRITE_SAME) {  			/* @@ -321,7 +323,7 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,  			dp->get_page(dp, &page, &len, &offset);  			bio_add_page(bio, page, logical_block_size, offset);  			num_sectors = min_t(sector_t, q->limits.max_write_same_sectors, remaining); -			bio->bi_size = num_sectors << SECTOR_SHIFT; +			bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;  			offset = 0;  			remaining -= num_sectors; @@ -386,6 +388,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,  	 */  	volatile char io_[sizeof(struct io) + __alignof__(struct io) - 1];  	struct io *io = (struct io *)PTR_ALIGN(&io_, __alignof__(struct io)); +	DECLARE_COMPLETION_ONSTACK(wait);  	if (num_regions > 1 && (rw & RW_MASK) != WRITE) {  		WARN_ON(1); @@ -394,7 +397,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,  	io->error_bits = 0;  	atomic_set(&io->count, 1); /* see dispatch_io() */ -	io->sleeper = current; +	io->wait = &wait;  	io->client = client;  	io->vma_invalidate_address = dp->vma_invalidate_address; @@ -402,15 +405,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,  	dispatch_io(rw, num_regions, where, dp, io, 1); -	while (1) { -		set_current_state(TASK_UNINTERRUPTIBLE); - -		if (!atomic_read(&io->count)) -			break; - -		io_schedule(); -	} -	set_current_state(TASK_RUNNING); +	wait_for_completion_io(&wait);  	if (error_bits)  		*error_bits = io->error_bits; @@ -433,7 +428,7 @@ static int async_io(struct dm_io_client *client, unsigned int num_regions,  	io = mempool_alloc(client->pool, GFP_NOIO);  	io->error_bits = 0;  	atomic_set(&io->count, 1); /* see dispatch_io() */ -	io->sleeper = NULL; +	io->wait = NULL;  	io->client = client;  	io->callback = fn;  	io->context = context; @@ -458,8 +453,8 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp,  		list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);  		break; -	case DM_IO_BVEC: -		bvec_dp_init(dp, io_req->mem.ptr.bvec); +	case DM_IO_BIO: +		bio_dp_init(dp, io_req->mem.ptr.bio);  		break;  	case DM_IO_VMA: diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index afe08146f73..51521429fb5 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -57,7 +57,7 @@ struct vers_iter {  static struct list_head _name_buckets[NUM_BUCKETS];  static struct list_head _uuid_buckets[NUM_BUCKETS]; -static void dm_hash_remove_all(int keep_open_devices); +static void dm_hash_remove_all(bool keep_open_devices, bool mark_deferred, bool only_deferred);  /*   * Guards access to both hash tables. @@ -86,7 +86,7 @@ static int dm_hash_init(void)  static void dm_hash_exit(void)  { -	dm_hash_remove_all(0); +	dm_hash_remove_all(false, false, false);  }  /*----------------------------------------------------------------- @@ -276,7 +276,7 @@ static struct dm_table *__hash_remove(struct hash_cell *hc)  	return table;  } -static void dm_hash_remove_all(int keep_open_devices) +static void dm_hash_remove_all(bool keep_open_devices, bool mark_deferred, bool only_deferred)  {  	int i, dev_skipped;  	struct hash_cell *hc; @@ -293,7 +293,8 @@ retry:  			md = hc->md;  			dm_get(md); -			if (keep_open_devices && dm_lock_for_deletion(md)) { +			if (keep_open_devices && +			    dm_lock_for_deletion(md, mark_deferred, only_deferred)) {  				dm_put(md);  				dev_skipped++;  				continue; @@ -450,6 +451,11 @@ static struct mapped_device *dm_hash_rename(struct dm_ioctl *param,  	return md;  } +void dm_deferred_remove(void) +{ +	dm_hash_remove_all(true, false, true); +} +  /*-----------------------------------------------------------------   * Implementation of the ioctl commands   *---------------------------------------------------------------*/ @@ -461,7 +467,7 @@ typedef int (*ioctl_fn)(struct dm_ioctl *param, size_t param_size);  static int remove_all(struct dm_ioctl *param, size_t param_size)  { -	dm_hash_remove_all(1); +	dm_hash_remove_all(true, !!(param->flags & DM_DEFERRED_REMOVE), false);  	param->data_size = 0;  	return 0;  } @@ -683,6 +689,9 @@ static void __dev_status(struct mapped_device *md, struct dm_ioctl *param)  	if (dm_suspended_md(md))  		param->flags |= DM_SUSPEND_FLAG; +	if (dm_test_deferred_remove_flag(md)) +		param->flags |= DM_DEFERRED_REMOVE; +  	param->dev = huge_encode_dev(disk_devt(disk));  	/* @@ -832,8 +841,13 @@ static int dev_remove(struct dm_ioctl *param, size_t param_size)  	/*  	 * Ensure the device is not open and nothing further can open it.  	 */ -	r = dm_lock_for_deletion(md); +	r = dm_lock_for_deletion(md, !!(param->flags & DM_DEFERRED_REMOVE), false);  	if (r) { +		if (r == -EBUSY && param->flags & DM_DEFERRED_REMOVE) { +			up_write(&_hash_lock); +			dm_put(md); +			return 0; +		}  		DMDEBUG_LIMIT("unable to remove open device %s", hc->name);  		up_write(&_hash_lock);  		dm_put(md); @@ -848,6 +862,8 @@ static int dev_remove(struct dm_ioctl *param, size_t param_size)  		dm_table_destroy(t);  	} +	param->flags &= ~DM_DEFERRED_REMOVE; +  	if (!dm_kobject_uevent(md, KOBJ_REMOVE, param->event_nr))  		param->flags |= DM_UEVENT_GENERATED_FLAG; @@ -1469,6 +1485,14 @@ static int message_for_md(struct mapped_device *md, unsigned argc, char **argv,  	if (**argv != '@')  		return 2; /* no '@' prefix, deliver to target */ +	if (!strcasecmp(argv[0], "@cancel_deferred_remove")) { +		if (argc != 1) { +			DMERR("Invalid arguments for @cancel_deferred_remove"); +			return -EINVAL; +		} +		return dm_cancel_deferred_remove(md); +	} +  	r = dm_stats_message(md, argc, argv, result, maxlen);  	if (r < 2)  		return r; diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c index 4f99d267340..53e848c1093 100644 --- a/drivers/md/dm-linear.c +++ b/drivers/md/dm-linear.c @@ -85,7 +85,8 @@ static void linear_map_bio(struct dm_target *ti, struct bio *bio)  	bio->bi_bdev = lc->dev->bdev;  	if (bio_sectors(bio)) -		bio->bi_sector = linear_map_sector(ti, bio->bi_sector); +		bio->bi_iter.bi_sector = +			linear_map_sector(ti, bio->bi_iter.bi_sector);  }  static int linear_map(struct dm_target *ti, struct bio *bio) diff --git a/drivers/md/dm-log-userspace-base.c b/drivers/md/dm-log-userspace-base.c index 9429159d9ee..b953db6cc22 100644 --- a/drivers/md/dm-log-userspace-base.c +++ b/drivers/md/dm-log-userspace-base.c @@ -10,10 +10,11 @@  #include <linux/device-mapper.h>  #include <linux/dm-log-userspace.h>  #include <linux/module.h> +#include <linux/workqueue.h>  #include "dm-log-userspace-transfer.h" -#define DM_LOG_USERSPACE_VSN "1.1.0" +#define DM_LOG_USERSPACE_VSN "1.3.0"  struct flush_entry {  	int type; @@ -58,6 +59,18 @@ struct log_c {  	spinlock_t flush_lock;  	struct list_head mark_list;  	struct list_head clear_list; + +	/* +	 * Workqueue for flush of clear region requests. +	 */ +	struct workqueue_struct *dmlog_wq; +	struct delayed_work flush_log_work; +	atomic_t sched_flush; + +	/* +	 * Combine userspace flush and mark requests for efficiency. +	 */ +	uint32_t integrated_flush;  };  static mempool_t *flush_entry_pool; @@ -122,6 +135,9 @@ static int build_constructor_string(struct dm_target *ti,  	*ctr_str = NULL; +	/* +	 * Determine overall size of the string. +	 */  	for (i = 0, str_size = 0; i < argc; i++)  		str_size += strlen(argv[i]) + 1; /* +1 for space between args */ @@ -141,18 +157,39 @@ static int build_constructor_string(struct dm_target *ti,  	return str_size;  } +static void do_flush(struct work_struct *work) +{ +	int r; +	struct log_c *lc = container_of(work, struct log_c, flush_log_work.work); + +	atomic_set(&lc->sched_flush, 0); + +	r = userspace_do_request(lc, lc->uuid, DM_ULOG_FLUSH, NULL, 0, NULL, NULL); + +	if (r) +		dm_table_event(lc->ti->table); +} +  /*   * userspace_ctr   *   * argv contains: - *	<UUID> <other args> - * Where 'other args' is the userspace implementation specific log - * arguments.  An example might be: - *	<UUID> clustered-disk <arg count> <log dev> <region_size> [[no]sync] + *	<UUID> [integrated_flush] <other args> + * Where 'other args' are the userspace implementation-specific log + * arguments. + * + * Example: + *	<UUID> [integrated_flush] clustered-disk <arg count> <log dev> + *	<region_size> [[no]sync] + * + * This module strips off the <UUID> and uses it for identification + * purposes when communicating with userspace about a log.   * - * So, this module will strip off the <UUID> for identification purposes - * when communicating with userspace about a log; but will pass on everything - * else. + * If integrated_flush is defined, the kernel combines flush + * and mark requests. + * + * The rest of the line, beginning with 'clustered-disk', is passed + * to the userspace ctr function.   */  static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti,  			 unsigned argc, char **argv) @@ -188,12 +225,22 @@ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti,  		return -EINVAL;  	} +	lc->usr_argc = argc; +  	strncpy(lc->uuid, argv[0], DM_UUID_LEN); +	argc--; +	argv++;  	spin_lock_init(&lc->flush_lock);  	INIT_LIST_HEAD(&lc->mark_list);  	INIT_LIST_HEAD(&lc->clear_list); -	str_size = build_constructor_string(ti, argc - 1, argv + 1, &ctr_str); +	if (!strcasecmp(argv[0], "integrated_flush")) { +		lc->integrated_flush = 1; +		argc--; +		argv++; +	} + +	str_size = build_constructor_string(ti, argc, argv, &ctr_str);  	if (str_size < 0) {  		kfree(lc);  		return str_size; @@ -246,6 +293,19 @@ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti,  			DMERR("Failed to register %s with device-mapper",  			      devices_rdata);  	} + +	if (lc->integrated_flush) { +		lc->dmlog_wq = alloc_workqueue("dmlogd", WQ_MEM_RECLAIM, 0); +		if (!lc->dmlog_wq) { +			DMERR("couldn't start dmlogd"); +			r = -ENOMEM; +			goto out; +		} + +		INIT_DELAYED_WORK(&lc->flush_log_work, do_flush); +		atomic_set(&lc->sched_flush, 0); +	} +  out:  	kfree(devices_rdata);  	if (r) { @@ -253,7 +313,6 @@ out:  		kfree(ctr_str);  	} else {  		lc->usr_argv_str = ctr_str; -		lc->usr_argc = argc;  		log->context = lc;  	} @@ -264,9 +323,16 @@ static void userspace_dtr(struct dm_dirty_log *log)  {  	struct log_c *lc = log->context; +	if (lc->integrated_flush) { +		/* flush workqueue */ +		if (atomic_read(&lc->sched_flush)) +			flush_delayed_work(&lc->flush_log_work); + +		destroy_workqueue(lc->dmlog_wq); +	} +  	(void) dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_DTR, -				 NULL, 0, -				 NULL, NULL); +				    NULL, 0, NULL, NULL);  	if (lc->log_dev)  		dm_put_device(lc->ti, lc->log_dev); @@ -283,8 +349,7 @@ static int userspace_presuspend(struct dm_dirty_log *log)  	struct log_c *lc = log->context;  	r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_PRESUSPEND, -				 NULL, 0, -				 NULL, NULL); +				 NULL, 0, NULL, NULL);  	return r;  } @@ -294,9 +359,14 @@ static int userspace_postsuspend(struct dm_dirty_log *log)  	int r;  	struct log_c *lc = log->context; +	/* +	 * Run planned flush earlier. +	 */ +	if (lc->integrated_flush && atomic_read(&lc->sched_flush)) +		flush_delayed_work(&lc->flush_log_work); +  	r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_POSTSUSPEND, -				 NULL, 0, -				 NULL, NULL); +				 NULL, 0, NULL, NULL);  	return r;  } @@ -308,8 +378,7 @@ static int userspace_resume(struct dm_dirty_log *log)  	lc->in_sync_hint = 0;  	r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_RESUME, -				 NULL, 0, -				 NULL, NULL); +				 NULL, 0, NULL, NULL);  	return r;  } @@ -405,7 +474,8 @@ static int flush_one_by_one(struct log_c *lc, struct list_head *flush_list)  	return r;  } -static int flush_by_group(struct log_c *lc, struct list_head *flush_list) +static int flush_by_group(struct log_c *lc, struct list_head *flush_list, +			  int flush_with_payload)  {  	int r = 0;  	int count; @@ -431,15 +501,29 @@ static int flush_by_group(struct log_c *lc, struct list_head *flush_list)  				break;  		} -		r = userspace_do_request(lc, lc->uuid, type, -					 (char *)(group), -					 count * sizeof(uint64_t), -					 NULL, NULL); -		if (r) { -			/* Group send failed.  Attempt one-by-one. */ -			list_splice_init(&tmp_list, flush_list); -			r = flush_one_by_one(lc, flush_list); -			break; +		if (flush_with_payload) { +			r = userspace_do_request(lc, lc->uuid, DM_ULOG_FLUSH, +						 (char *)(group), +						 count * sizeof(uint64_t), +						 NULL, NULL); +			/* +			 * Integrated flush failed. +			 */ +			if (r) +				break; +		} else { +			r = userspace_do_request(lc, lc->uuid, type, +						 (char *)(group), +						 count * sizeof(uint64_t), +						 NULL, NULL); +			if (r) { +				/* +				 * Group send failed.  Attempt one-by-one. +				 */ +				list_splice_init(&tmp_list, flush_list); +				r = flush_one_by_one(lc, flush_list); +				break; +			}  		}  	} @@ -476,6 +560,8 @@ static int userspace_flush(struct dm_dirty_log *log)  	struct log_c *lc = log->context;  	LIST_HEAD(mark_list);  	LIST_HEAD(clear_list); +	int mark_list_is_empty; +	int clear_list_is_empty;  	struct flush_entry *fe, *tmp_fe;  	spin_lock_irqsave(&lc->flush_lock, flags); @@ -483,23 +569,51 @@ static int userspace_flush(struct dm_dirty_log *log)  	list_splice_init(&lc->clear_list, &clear_list);  	spin_unlock_irqrestore(&lc->flush_lock, flags); -	if (list_empty(&mark_list) && list_empty(&clear_list)) +	mark_list_is_empty = list_empty(&mark_list); +	clear_list_is_empty = list_empty(&clear_list); + +	if (mark_list_is_empty && clear_list_is_empty)  		return 0; -	r = flush_by_group(lc, &mark_list); +	r = flush_by_group(lc, &clear_list, 0);  	if (r) -		goto fail; +		goto out; -	r = flush_by_group(lc, &clear_list); +	if (!lc->integrated_flush) { +		r = flush_by_group(lc, &mark_list, 0); +		if (r) +			goto out; +		r = userspace_do_request(lc, lc->uuid, DM_ULOG_FLUSH, +					 NULL, 0, NULL, NULL); +		goto out; +	} + +	/* +	 * Send integrated flush request with mark_list as payload. +	 */ +	r = flush_by_group(lc, &mark_list, 1);  	if (r) -		goto fail; +		goto out; -	r = userspace_do_request(lc, lc->uuid, DM_ULOG_FLUSH, -				 NULL, 0, NULL, NULL); +	if (mark_list_is_empty && !atomic_read(&lc->sched_flush)) { +		/* +		 * When there are only clear region requests, +		 * we schedule a flush in the future. +		 */ +		queue_delayed_work(lc->dmlog_wq, &lc->flush_log_work, 3 * HZ); +		atomic_set(&lc->sched_flush, 1); +	} else { +		/* +		 * Cancel pending flush because we +		 * have already flushed in mark_region. +		 */ +		cancel_delayed_work(&lc->flush_log_work); +		atomic_set(&lc->sched_flush, 0); +	} -fail: +out:  	/* -	 * We can safely remove these entries, even if failure. +	 * We can safely remove these entries, even after failure.  	 * Calling code will receive an error and will know that  	 * the log facility has failed.  	 */ @@ -603,8 +717,7 @@ static int userspace_get_resync_work(struct dm_dirty_log *log, region_t *region)  	rdata_size = sizeof(pkg);  	r = userspace_do_request(lc, lc->uuid, DM_ULOG_GET_RESYNC_WORK, -				 NULL, 0, -				 (char *)&pkg, &rdata_size); +				 NULL, 0, (char *)&pkg, &rdata_size);  	*region = pkg.r;  	return (r) ? r : (int)pkg.i; @@ -630,8 +743,7 @@ static void userspace_set_region_sync(struct dm_dirty_log *log,  	pkg.i = (int64_t)in_sync;  	r = userspace_do_request(lc, lc->uuid, DM_ULOG_SET_REGION_SYNC, -				 (char *)&pkg, sizeof(pkg), -				 NULL, NULL); +				 (char *)&pkg, sizeof(pkg), NULL, NULL);  	/*  	 * It would be nice to be able to report failures. @@ -657,8 +769,7 @@ static region_t userspace_get_sync_count(struct dm_dirty_log *log)  	rdata_size = sizeof(sync_count);  	r = userspace_do_request(lc, lc->uuid, DM_ULOG_GET_SYNC_COUNT, -				 NULL, 0, -				 (char *)&sync_count, &rdata_size); +				 NULL, 0, (char *)&sync_count, &rdata_size);  	if (r)  		return 0; @@ -685,8 +796,7 @@ static int userspace_status(struct dm_dirty_log *log, status_type_t status_type,  	switch (status_type) {  	case STATUSTYPE_INFO:  		r = userspace_do_request(lc, lc->uuid, DM_ULOG_STATUS_INFO, -					 NULL, 0, -					 result, &sz); +					 NULL, 0, result, &sz);  		if (r) {  			sz = 0; @@ -699,8 +809,10 @@ static int userspace_status(struct dm_dirty_log *log, status_type_t status_type,  		BUG_ON(!table_args); /* There will always be a ' ' */  		table_args++; -		DMEMIT("%s %u %s %s ", log->type->name, lc->usr_argc, -		       lc->uuid, table_args); +		DMEMIT("%s %u %s ", log->type->name, lc->usr_argc, lc->uuid); +		if (lc->integrated_flush) +			DMEMIT("integrated_flush "); +		DMEMIT("%s ", table_args);  		break;  	}  	return (r) ? 0 : (int)sz; diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c index 08d9a207259..b428c0ae63d 100644 --- a/drivers/md/dm-log-userspace-transfer.c +++ b/drivers/md/dm-log-userspace-transfer.c @@ -66,7 +66,7 @@ static int dm_ulog_sendto_server(struct dm_ulog_request *tfr)  	msg->seq = tfr->seq;  	msg->len = sizeof(struct dm_ulog_request) + tfr->data_size; -	r = cn_netlink_send(msg, 0, gfp_any()); +	r = cn_netlink_send(msg, 0, 0, gfp_any());  	return r;  } diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index b759a127f9c..f4167b013d9 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -7,6 +7,7 @@  #include <linux/device-mapper.h> +#include "dm.h"  #include "dm-path-selector.h"  #include "dm-uevent.h" @@ -86,15 +87,12 @@ struct multipath {  	unsigned queue_if_no_path:1;	/* Queue I/O if last path fails? */  	unsigned saved_queue_if_no_path:1; /* Saved state during suspension */  	unsigned retain_attached_hw_handler:1; /* If there's already a hw_handler present, don't change it. */ +	unsigned pg_init_disabled:1;	/* pg_init is not currently allowed */  	unsigned pg_init_retries;	/* Number of times to retry pg_init */  	unsigned pg_init_count;		/* Number of times pg_init called */  	unsigned pg_init_delay_msecs;	/* Number of msecs before pg_init retry */ -	unsigned queue_size; -	struct work_struct process_queued_ios; -	struct list_head queued_ios; -  	struct work_struct trigger_event;  	/* @@ -116,14 +114,12 @@ struct dm_mpath_io {  typedef int (*action_fn) (struct pgpath *pgpath); -#define MIN_IOS 256	/* Mempool size */ -  static struct kmem_cache *_mpio_cache;  static struct workqueue_struct *kmultipathd, *kmpath_handlerd; -static void process_queued_ios(struct work_struct *work);  static void trigger_event(struct work_struct *work);  static void activate_path(struct work_struct *work); +static int __pgpath_busy(struct pgpath *pgpath);  /*----------------------------------------------- @@ -190,19 +186,18 @@ static void free_priority_group(struct priority_group *pg,  static struct multipath *alloc_multipath(struct dm_target *ti)  {  	struct multipath *m; +	unsigned min_ios = dm_get_reserved_rq_based_ios();  	m = kzalloc(sizeof(*m), GFP_KERNEL);  	if (m) {  		INIT_LIST_HEAD(&m->priority_groups); -		INIT_LIST_HEAD(&m->queued_ios);  		spin_lock_init(&m->lock);  		m->queue_io = 1;  		m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT; -		INIT_WORK(&m->process_queued_ios, process_queued_ios);  		INIT_WORK(&m->trigger_event, trigger_event);  		init_waitqueue_head(&m->pg_init_wait);  		mutex_init(&m->work_mutex); -		m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache); +		m->mpio_pool = mempool_create_slab_pool(min_ios, _mpio_cache);  		if (!m->mpio_pool) {  			kfree(m);  			return NULL; @@ -255,13 +250,21 @@ static void clear_mapinfo(struct multipath *m, union map_info *info)   * Path selection   *-----------------------------------------------*/ -static void __pg_init_all_paths(struct multipath *m) +static int __pg_init_all_paths(struct multipath *m)  {  	struct pgpath *pgpath;  	unsigned long pg_init_delay = 0; +	if (m->pg_init_in_progress || m->pg_init_disabled) +		return 0; +  	m->pg_init_count++;  	m->pg_init_required = 0; + +	/* Check here to reset pg_init_required */ +	if (!m->current_pg) +		return 0; +  	if (m->pg_init_delay_retry)  		pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ?  						 m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS); @@ -273,6 +276,7 @@ static void __pg_init_all_paths(struct multipath *m)  				       pg_init_delay))  			m->pg_init_in_progress++;  	} +	return m->pg_init_in_progress;  }  static void __switch_pg(struct multipath *m, struct pgpath *pgpath) @@ -364,19 +368,26 @@ failed:   */  static int __must_push_back(struct multipath *m)  { -	return (m->queue_if_no_path != m->saved_queue_if_no_path && -		dm_noflush_suspending(m->ti)); +	return (m->queue_if_no_path || +		(m->queue_if_no_path != m->saved_queue_if_no_path && +		 dm_noflush_suspending(m->ti)));  } -static int map_io(struct multipath *m, struct request *clone, -		  union map_info *map_context, unsigned was_queued) +#define pg_ready(m) (!(m)->queue_io && !(m)->pg_init_required) + +/* + * Map cloned requests + */ +static int multipath_map(struct dm_target *ti, struct request *clone, +			 union map_info *map_context)  { -	int r = DM_MAPIO_REMAPPED; +	struct multipath *m = (struct multipath *) ti->private; +	int r = DM_MAPIO_REQUEUE;  	size_t nr_bytes = blk_rq_bytes(clone);  	unsigned long flags;  	struct pgpath *pgpath;  	struct block_device *bdev; -	struct dm_mpath_io *mpio = map_context->ptr; +	struct dm_mpath_io *mpio;  	spin_lock_irqsave(&m->lock, flags); @@ -387,35 +398,33 @@ static int map_io(struct multipath *m, struct request *clone,  	pgpath = m->current_pgpath; -	if (was_queued) -		m->queue_size--; - -	if ((pgpath && m->queue_io) || -	    (!pgpath && m->queue_if_no_path)) { -		/* Queue for the daemon to resubmit */ -		list_add_tail(&clone->queuelist, &m->queued_ios); -		m->queue_size++; -		if ((m->pg_init_required && !m->pg_init_in_progress) || -		    !m->queue_io) -			queue_work(kmultipathd, &m->process_queued_ios); -		pgpath = NULL; -		r = DM_MAPIO_SUBMITTED; -	} else if (pgpath) { -		bdev = pgpath->path.dev->bdev; -		clone->q = bdev_get_queue(bdev); -		clone->rq_disk = bdev->bd_disk; -	} else if (__must_push_back(m)) -		r = DM_MAPIO_REQUEUE; -	else -		r = -EIO;	/* Failed */ +	if (!pgpath) { +		if (!__must_push_back(m)) +			r = -EIO;	/* Failed */ +		goto out_unlock; +	} +	if (!pg_ready(m)) { +		__pg_init_all_paths(m); +		goto out_unlock; +	} +	if (set_mapinfo(m, map_context) < 0) +		/* ENOMEM, requeue */ +		goto out_unlock; +	bdev = pgpath->path.dev->bdev; +	clone->q = bdev_get_queue(bdev); +	clone->rq_disk = bdev->bd_disk; +	clone->cmd_flags |= REQ_FAILFAST_TRANSPORT; +	mpio = map_context->ptr;  	mpio->pgpath = pgpath;  	mpio->nr_bytes = nr_bytes; - -	if (r == DM_MAPIO_REMAPPED && pgpath->pg->ps.type->start_io) -		pgpath->pg->ps.type->start_io(&pgpath->pg->ps, &pgpath->path, +	if (pgpath->pg->ps.type->start_io) +		pgpath->pg->ps.type->start_io(&pgpath->pg->ps, +					      &pgpath->path,  					      nr_bytes); +	r = DM_MAPIO_REMAPPED; +out_unlock:  	spin_unlock_irqrestore(&m->lock, flags);  	return r; @@ -436,73 +445,12 @@ static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path,  	else  		m->saved_queue_if_no_path = queue_if_no_path;  	m->queue_if_no_path = queue_if_no_path; -	if (!m->queue_if_no_path && m->queue_size) -		queue_work(kmultipathd, &m->process_queued_ios); -  	spin_unlock_irqrestore(&m->lock, flags); -	return 0; -} - -/*----------------------------------------------------------------- - * The multipath daemon is responsible for resubmitting queued ios. - *---------------------------------------------------------------*/ - -static void dispatch_queued_ios(struct multipath *m) -{ -	int r; -	unsigned long flags; -	union map_info *info; -	struct request *clone, *n; -	LIST_HEAD(cl); - -	spin_lock_irqsave(&m->lock, flags); -	list_splice_init(&m->queued_ios, &cl); -	spin_unlock_irqrestore(&m->lock, flags); +	if (!queue_if_no_path) +		dm_table_run_md_queue_async(m->ti->table); -	list_for_each_entry_safe(clone, n, &cl, queuelist) { -		list_del_init(&clone->queuelist); - -		info = dm_get_rq_mapinfo(clone); - -		r = map_io(m, clone, info, 1); -		if (r < 0) { -			clear_mapinfo(m, info); -			dm_kill_unmapped_request(clone, r); -		} else if (r == DM_MAPIO_REMAPPED) -			dm_dispatch_request(clone); -		else if (r == DM_MAPIO_REQUEUE) { -			clear_mapinfo(m, info); -			dm_requeue_unmapped_request(clone); -		} -	} -} - -static void process_queued_ios(struct work_struct *work) -{ -	struct multipath *m = -		container_of(work, struct multipath, process_queued_ios); -	struct pgpath *pgpath = NULL; -	unsigned must_queue = 1; -	unsigned long flags; - -	spin_lock_irqsave(&m->lock, flags); - -	if (!m->current_pgpath) -		__choose_pgpath(m, 0); - -	pgpath = m->current_pgpath; - -	if ((pgpath && !m->queue_io) || -	    (!pgpath && !m->queue_if_no_path)) -		must_queue = 0; - -	if (m->pg_init_required && !m->pg_init_in_progress && pgpath) -		__pg_init_all_paths(m); - -	spin_unlock_irqrestore(&m->lock, flags); -	if (!must_queue) -		dispatch_queued_ios(m); +	return 0;  }  /* @@ -942,10 +890,20 @@ static void multipath_wait_for_pg_init_completion(struct multipath *m)  static void flush_multipath_work(struct multipath *m)  { +	unsigned long flags; + +	spin_lock_irqsave(&m->lock, flags); +	m->pg_init_disabled = 1; +	spin_unlock_irqrestore(&m->lock, flags); +  	flush_workqueue(kmpath_handlerd);  	multipath_wait_for_pg_init_completion(m);  	flush_workqueue(kmultipathd);  	flush_work(&m->trigger_event); + +	spin_lock_irqsave(&m->lock, flags); +	m->pg_init_disabled = 0; +	spin_unlock_irqrestore(&m->lock, flags);  }  static void multipath_dtr(struct dm_target *ti) @@ -957,27 +915,6 @@ static void multipath_dtr(struct dm_target *ti)  }  /* - * Map cloned requests - */ -static int multipath_map(struct dm_target *ti, struct request *clone, -			 union map_info *map_context) -{ -	int r; -	struct multipath *m = (struct multipath *) ti->private; - -	if (set_mapinfo(m, map_context) < 0) -		/* ENOMEM, requeue */ -		return DM_MAPIO_REQUEUE; - -	clone->cmd_flags |= REQ_FAILFAST_TRANSPORT; -	r = map_io(m, clone, map_context, 0); -	if (r < 0 || r == DM_MAPIO_REQUEUE) -		clear_mapinfo(m, map_context); - -	return r; -} - -/*   * Take a path out of use.   */  static int fail_path(struct pgpath *pgpath) @@ -1017,7 +954,7 @@ out:   */  static int reinstate_path(struct pgpath *pgpath)  { -	int r = 0; +	int r = 0, run_queue = 0;  	unsigned long flags;  	struct multipath *m = pgpath->pg->m; @@ -1039,9 +976,9 @@ static int reinstate_path(struct pgpath *pgpath)  	pgpath->is_active = 1; -	if (!m->nr_valid_paths++ && m->queue_size) { +	if (!m->nr_valid_paths++) {  		m->current_pgpath = NULL; -		queue_work(kmultipathd, &m->process_queued_ios); +		run_queue = 1;  	} else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {  		if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))  			m->pg_init_in_progress++; @@ -1054,6 +991,8 @@ static int reinstate_path(struct pgpath *pgpath)  out:  	spin_unlock_irqrestore(&m->lock, flags); +	if (run_queue) +		dm_table_run_md_queue_async(m->ti->table);  	return r;  } @@ -1164,7 +1103,7 @@ static int pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)  	spin_lock_irqsave(&m->lock, flags); -	if (m->pg_init_count <= m->pg_init_retries) +	if (m->pg_init_count <= m->pg_init_retries && !m->pg_init_disabled)  		m->pg_init_required = 1;  	else  		limit_reached = 1; @@ -1237,11 +1176,12 @@ static void pg_init_done(void *data, int errors)  		/* Activations of other paths are still on going */  		goto out; -	if (!m->pg_init_required) -		m->queue_io = 0; - -	m->pg_init_delay_retry = delay_retry; -	queue_work(kmultipathd, &m->process_queued_ios); +	if (m->pg_init_required) { +		m->pg_init_delay_retry = delay_retry; +		if (__pg_init_all_paths(m)) +			goto out; +	} +	m->queue_io = 0;  	/*  	 * Wake up any thread waiting to suspend. @@ -1257,8 +1197,11 @@ static void activate_path(struct work_struct *work)  	struct pgpath *pgpath =  		container_of(work, struct pgpath, activate_path.work); -	scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev), -				pg_init_done, pgpath); +	if (pgpath->is_active) +		scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev), +				 pg_init_done, pgpath); +	else +		pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED);  }  static int noretry_error(int error) @@ -1268,6 +1211,7 @@ static int noretry_error(int error)  	case -EREMOTEIO:  	case -EILSEQ:  	case -ENODATA: +	case -ENOSPC:  		return 1;  	} @@ -1408,7 +1352,7 @@ static void multipath_status(struct dm_target *ti, status_type_t type,  	/* Features */  	if (type == STATUSTYPE_INFO) -		DMEMIT("2 %u %u ", m->queue_size, m->pg_init_count); +		DMEMIT("2 %u %u ", m->queue_io, m->pg_init_count);  	else {  		DMEMIT("%u ", m->queue_if_no_path +  			      (m->pg_init_retries > 0) * 2 + @@ -1527,7 +1471,7 @@ static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)  	}  	if (argc != 2) { -		DMWARN("Unrecognised multipath message received."); +		DMWARN("Invalid multipath message arguments. Expected 2 arguments, got %d.", argc);  		goto out;  	} @@ -1545,7 +1489,7 @@ static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)  	else if (!strcasecmp(argv[0], "fail_path"))  		action = fail_path;  	else { -		DMWARN("Unrecognised multipath message received."); +		DMWARN("Unrecognised multipath message received: %s", argv[0]);  		goto out;  	} @@ -1601,11 +1545,23 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,  	/*  	 * Only pass ioctls through if the device sizes match exactly.  	 */ -	if (!r && ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT) -		r = scsi_verify_blk_ioctl(NULL, cmd); +	if (!bdev || ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT) { +		int err = scsi_verify_blk_ioctl(NULL, cmd); +		if (err) +			r = err; +	} -	if (r == -ENOTCONN && !fatal_signal_pending(current)) -		queue_work(kmultipathd, &m->process_queued_ios); +	if (r == -ENOTCONN && !fatal_signal_pending(current)) { +		spin_lock_irqsave(&m->lock, flags); +		if (!m->current_pg) { +			/* Path status changed, redo selection */ +			__choose_pgpath(m, 0); +		} +		if (m->pg_init_required) +			__pg_init_all_paths(m); +		spin_unlock_irqrestore(&m->lock, flags); +		dm_table_run_md_queue_async(m->ti->table); +	}  	return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);  } @@ -1655,6 +1611,12 @@ static int multipath_busy(struct dm_target *ti)  	spin_lock_irqsave(&m->lock, flags); +	/* pg_init in progress or no paths available */ +	if (m->pg_init_in_progress || +	    (!m->nr_valid_paths && m->queue_if_no_path)) { +		busy = 1; +		goto out; +	}  	/* Guess which priority_group will be used at next mapping time */  	if (unlikely(!m->current_pgpath && m->next_pg))  		pg = m->next_pg; @@ -1704,7 +1666,7 @@ out:   *---------------------------------------------------------------*/  static struct target_type multipath_target = {  	.name = "multipath", -	.version = {1, 5, 1}, +	.version = {1, 7, 0},  	.module = THIS_MODULE,  	.ctr = multipath_ctr,  	.dtr = multipath_dtr, diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index 9584443c561..7dfdb5c746d 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c @@ -432,7 +432,7 @@ static int mirror_available(struct mirror_set *ms, struct bio *bio)  	region_t region = dm_rh_bio_to_region(ms->rh, bio);  	if (log->type->in_sync(log, region, 0)) -		return choose_mirror(ms,  bio->bi_sector) ? 1 : 0; +		return choose_mirror(ms,  bio->bi_iter.bi_sector) ? 1 : 0;  	return 0;  } @@ -442,15 +442,15 @@ static int mirror_available(struct mirror_set *ms, struct bio *bio)   */  static sector_t map_sector(struct mirror *m, struct bio *bio)  { -	if (unlikely(!bio->bi_size)) +	if (unlikely(!bio->bi_iter.bi_size))  		return 0; -	return m->offset + dm_target_offset(m->ms->ti, bio->bi_sector); +	return m->offset + dm_target_offset(m->ms->ti, bio->bi_iter.bi_sector);  }  static void map_bio(struct mirror *m, struct bio *bio)  {  	bio->bi_bdev = m->dev->bdev; -	bio->bi_sector = map_sector(m, bio); +	bio->bi_iter.bi_sector = map_sector(m, bio);  }  static void map_region(struct dm_io_region *io, struct mirror *m, @@ -526,8 +526,8 @@ static void read_async_bio(struct mirror *m, struct bio *bio)  	struct dm_io_region io;  	struct dm_io_request io_req = {  		.bi_rw = READ, -		.mem.type = DM_IO_BVEC, -		.mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx, +		.mem.type = DM_IO_BIO, +		.mem.ptr.bio = bio,  		.notify.fn = read_callback,  		.notify.context = bio,  		.client = m->ms->io_client, @@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)  		 * We can only read balance if the region is in sync.  		 */  		if (likely(region_in_sync(ms, region, 1))) -			m = choose_mirror(ms, bio->bi_sector); +			m = choose_mirror(ms, bio->bi_iter.bi_sector);  		else if (m && atomic_read(&m->error_count))  			m = NULL; @@ -629,8 +629,8 @@ static void do_write(struct mirror_set *ms, struct bio *bio)  	struct mirror *m;  	struct dm_io_request io_req = {  		.bi_rw = WRITE | (bio->bi_rw & WRITE_FLUSH_FUA), -		.mem.type = DM_IO_BVEC, -		.mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx, +		.mem.type = DM_IO_BIO, +		.mem.ptr.bio = bio,  		.notify.fn = write_callback,  		.notify.context = bio,  		.client = ms->io_client, @@ -1181,7 +1181,7 @@ static int mirror_map(struct dm_target *ti, struct bio *bio)  	 * The region is in-sync and we can perform reads directly.  	 * Store enough information so we can retry if it fails.  	 */ -	m = choose_mirror(ms, bio->bi_sector); +	m = choose_mirror(ms, bio->bi_iter.bi_sector);  	if (unlikely(!m))  		return -EIO; @@ -1244,6 +1244,9 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)  			dm_bio_restore(bd, bio);  			bio_record->details.bi_bdev = NULL; + +			atomic_inc(&bio->bi_remaining); +  			queue_bio(ms, bio, rw);  			return DM_ENDIO_INCOMPLETE;  		} diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c index 69732e03eb3..b929fd5f498 100644 --- a/drivers/md/dm-region-hash.c +++ b/drivers/md/dm-region-hash.c @@ -126,7 +126,8 @@ EXPORT_SYMBOL_GPL(dm_rh_region_to_sector);  region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio)  { -	return dm_rh_sector_to_region(rh, bio->bi_sector - rh->target_begin); +	return dm_rh_sector_to_region(rh, bio->bi_iter.bi_sector - +				      rh->target_begin);  }  EXPORT_SYMBOL_GPL(dm_rh_bio_to_region); diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c index 3ac415675b6..d6e88178d22 100644 --- a/drivers/md/dm-snap-persistent.c +++ b/drivers/md/dm-snap-persistent.c @@ -13,10 +13,13 @@  #include <linux/export.h>  #include <linux/slab.h>  #include <linux/dm-io.h> +#include "dm-bufio.h"  #define DM_MSG_PREFIX "persistent snapshot"  #define DM_CHUNK_SIZE_DEFAULT_SECTORS 32	/* 16KB */ +#define DM_PREFETCH_CHUNKS		12 +  /*-----------------------------------------------------------------   * Persistent snapshots, by persistent we mean that the snapshot   * will survive a reboot. @@ -256,7 +259,8 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw,  	 */  	INIT_WORK_ONSTACK(&req.work, do_metadata);  	queue_work(ps->metadata_wq, &req.work); -	flush_work(&req.work); +	flush_workqueue(ps->metadata_wq); +	destroy_work_on_stack(&req.work);  	return req.result;  } @@ -269,6 +273,14 @@ static chunk_t area_location(struct pstore *ps, chunk_t area)  	return NUM_SNAPSHOT_HDR_CHUNKS + ((ps->exceptions_per_area + 1) * area);  } +static void skip_metadata(struct pstore *ps) +{ +	uint32_t stride = ps->exceptions_per_area + 1; +	chunk_t next_free = ps->next_free; +	if (sector_div(next_free, stride) == NUM_SNAPSHOT_HDR_CHUNKS) +		ps->next_free++; +} +  /*   * Read or write a metadata area.  Remembering to skip the first   * chunk which holds the header. @@ -393,17 +405,18 @@ static int write_header(struct pstore *ps)  /*   * Access functions for the disk exceptions, these do the endian conversions.   */ -static struct disk_exception *get_exception(struct pstore *ps, uint32_t index) +static struct disk_exception *get_exception(struct pstore *ps, void *ps_area, +					    uint32_t index)  {  	BUG_ON(index >= ps->exceptions_per_area); -	return ((struct disk_exception *) ps->area) + index; +	return ((struct disk_exception *) ps_area) + index;  } -static void read_exception(struct pstore *ps, +static void read_exception(struct pstore *ps, void *ps_area,  			   uint32_t index, struct core_exception *result)  { -	struct disk_exception *de = get_exception(ps, index); +	struct disk_exception *de = get_exception(ps, ps_area, index);  	/* copy it */  	result->old_chunk = le64_to_cpu(de->old_chunk); @@ -413,7 +426,7 @@ static void read_exception(struct pstore *ps,  static void write_exception(struct pstore *ps,  			    uint32_t index, struct core_exception *e)  { -	struct disk_exception *de = get_exception(ps, index); +	struct disk_exception *de = get_exception(ps, ps->area, index);  	/* copy it */  	de->old_chunk = cpu_to_le64(e->old_chunk); @@ -422,7 +435,7 @@ static void write_exception(struct pstore *ps,  static void clear_exception(struct pstore *ps, uint32_t index)  { -	struct disk_exception *de = get_exception(ps, index); +	struct disk_exception *de = get_exception(ps, ps->area, index);  	/* clear it */  	de->old_chunk = 0; @@ -434,7 +447,7 @@ static void clear_exception(struct pstore *ps, uint32_t index)   * 'full' is filled in to indicate if the area has been   * filled.   */ -static int insert_exceptions(struct pstore *ps, +static int insert_exceptions(struct pstore *ps, void *ps_area,  			     int (*callback)(void *callback_context,  					     chunk_t old, chunk_t new),  			     void *callback_context, @@ -448,7 +461,7 @@ static int insert_exceptions(struct pstore *ps,  	*full = 1;  	for (i = 0; i < ps->exceptions_per_area; i++) { -		read_exception(ps, i, &e); +		read_exception(ps, ps_area, i, &e);  		/*  		 * If the new_chunk is pointing at the start of @@ -485,24 +498,75 @@ static int read_exceptions(struct pstore *ps,  			   void *callback_context)  {  	int r, full = 1; +	struct dm_bufio_client *client; +	chunk_t prefetch_area = 0; + +	client = dm_bufio_client_create(dm_snap_cow(ps->store->snap)->bdev, +					ps->store->chunk_size << SECTOR_SHIFT, +					1, 0, NULL, NULL); + +	if (IS_ERR(client)) +		return PTR_ERR(client); + +	/* +	 * Setup for one current buffer + desired readahead buffers. +	 */ +	dm_bufio_set_minimum_buffers(client, 1 + DM_PREFETCH_CHUNKS);  	/*  	 * Keeping reading chunks and inserting exceptions until  	 * we find a partially full area.  	 */  	for (ps->current_area = 0; full; ps->current_area++) { -		r = area_io(ps, READ); -		if (r) -			return r; +		struct dm_buffer *bp; +		void *area; +		chunk_t chunk; + +		if (unlikely(prefetch_area < ps->current_area)) +			prefetch_area = ps->current_area; + +		if (DM_PREFETCH_CHUNKS) do { +			chunk_t pf_chunk = area_location(ps, prefetch_area); +			if (unlikely(pf_chunk >= dm_bufio_get_device_size(client))) +				break; +			dm_bufio_prefetch(client, pf_chunk, 1); +			prefetch_area++; +			if (unlikely(!prefetch_area)) +				break; +		} while (prefetch_area <= ps->current_area + DM_PREFETCH_CHUNKS); + +		chunk = area_location(ps, ps->current_area); + +		area = dm_bufio_read(client, chunk, &bp); +		if (unlikely(IS_ERR(area))) { +			r = PTR_ERR(area); +			goto ret_destroy_bufio; +		} -		r = insert_exceptions(ps, callback, callback_context, &full); -		if (r) -			return r; +		r = insert_exceptions(ps, area, callback, callback_context, +				      &full); + +		if (!full) +			memcpy(ps->area, area, ps->store->chunk_size << SECTOR_SHIFT); + +		dm_bufio_release(bp); + +		dm_bufio_forget(client, chunk); + +		if (unlikely(r)) +			goto ret_destroy_bufio;  	}  	ps->current_area--; -	return 0; +	skip_metadata(ps); + +	r = 0; + +ret_destroy_bufio: +	dm_bufio_client_destroy(client); + +	return r;  }  static struct pstore *get_info(struct dm_exception_store *store) @@ -616,8 +680,6 @@ static int persistent_prepare_exception(struct dm_exception_store *store,  					struct dm_exception *e)  {  	struct pstore *ps = get_info(store); -	uint32_t stride; -	chunk_t next_free;  	sector_t size = get_dev_size(dm_snap_cow(store->snap)->bdev);  	/* Is there enough room ? */ @@ -630,10 +692,8 @@ static int persistent_prepare_exception(struct dm_exception_store *store,  	 * Move onto the next free pending, making sure to take  	 * into account the location of the metadata chunks.  	 */ -	stride = (ps->exceptions_per_area + 1); -	next_free = ++ps->next_free; -	if (sector_div(next_free, stride) == 1) -		ps->next_free++; +	ps->next_free++; +	skip_metadata(ps);  	atomic_inc(&ps->pending_count);  	return 0; @@ -727,7 +787,7 @@ static int persistent_prepare_merge(struct dm_exception_store *store,  		ps->current_committed = ps->exceptions_per_area;  	} -	read_exception(ps, ps->current_committed - 1, &ce); +	read_exception(ps, ps->area, ps->current_committed - 1, &ce);  	*last_old_chunk = ce.old_chunk;  	*last_new_chunk = ce.new_chunk; @@ -737,8 +797,8 @@ static int persistent_prepare_merge(struct dm_exception_store *store,  	 */  	for (nr_consecutive = 1; nr_consecutive < ps->current_committed;  	     nr_consecutive++) { -		read_exception(ps, ps->current_committed - 1 - nr_consecutive, -			       &ce); +		read_exception(ps, ps->area, +			       ps->current_committed - 1 - nr_consecutive, &ce);  		if (ce.old_chunk != *last_old_chunk - nr_consecutive ||  		    ce.new_chunk != *last_new_chunk - nr_consecutive)  			break; diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index c434e5aab2d..5bd2290cfb1 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -66,6 +66,18 @@ struct dm_snapshot {  	atomic_t pending_exceptions_count; +	/* Protected by "lock" */ +	sector_t exception_start_sequence; + +	/* Protected by kcopyd single-threaded callback */ +	sector_t exception_complete_sequence; + +	/* +	 * A list of pending exceptions that completed out of order. +	 * Protected by kcopyd single-threaded callback. +	 */ +	struct list_head out_of_order_list; +  	mempool_t *pending_pool;  	struct dm_exception_table pending; @@ -173,6 +185,14 @@ struct dm_snap_pending_exception {  	 */  	int started; +	/* There was copying error. */ +	int copy_error; + +	/* A sequence number, it is used for in-order completion. */ +	sector_t exception_sequence; + +	struct list_head out_of_order_entry; +  	/*  	 * For writing a complete chunk, bypassing the copy.  	 */ @@ -590,12 +610,12 @@ static struct dm_exception *dm_lookup_exception(struct dm_exception_table *et,  	return NULL;  } -static struct dm_exception *alloc_completed_exception(void) +static struct dm_exception *alloc_completed_exception(gfp_t gfp)  {  	struct dm_exception *e; -	e = kmem_cache_alloc(exception_cache, GFP_NOIO); -	if (!e) +	e = kmem_cache_alloc(exception_cache, gfp); +	if (!e && gfp == GFP_NOIO)  		e = kmem_cache_alloc(exception_cache, GFP_ATOMIC);  	return e; @@ -622,7 +642,7 @@ static void free_pending_exception(struct dm_snap_pending_exception *pe)  	struct dm_snapshot *s = pe->snap;  	mempool_free(pe, s->pending_pool); -	smp_mb__before_atomic_dec(); +	smp_mb__before_atomic();  	atomic_dec(&s->pending_exceptions_count);  } @@ -677,7 +697,7 @@ static int dm_add_exception(void *context, chunk_t old, chunk_t new)  	struct dm_snapshot *s = context;  	struct dm_exception *e; -	e = alloc_completed_exception(); +	e = alloc_completed_exception(GFP_KERNEL);  	if (!e)  		return -ENOMEM; @@ -725,17 +745,16 @@ static int calc_max_buckets(void)   */  static int init_hash_tables(struct dm_snapshot *s)  { -	sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets; +	sector_t hash_size, cow_dev_size, max_buckets;  	/*  	 * Calculate based on the size of the original volume or  	 * the COW volume...  	 */  	cow_dev_size = get_dev_size(s->cow->bdev); -	origin_dev_size = get_dev_size(s->origin->bdev);  	max_buckets = calc_max_buckets(); -	hash_size = min(origin_dev_size, cow_dev_size) >> s->store->chunk_shift; +	hash_size = cow_dev_size >> s->store->chunk_shift;  	hash_size = min(hash_size, max_buckets);  	if (hash_size < 64) @@ -764,7 +783,7 @@ static int init_hash_tables(struct dm_snapshot *s)  static void merge_shutdown(struct dm_snapshot *s)  {  	clear_bit_unlock(RUNNING_MERGE, &s->state_bits); -	smp_mb__after_clear_bit(); +	smp_mb__after_atomic();  	wake_up_bit(&s->state_bits, RUNNING_MERGE);  } @@ -1095,6 +1114,9 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)  	s->valid = 1;  	s->active = 0;  	atomic_set(&s->pending_exceptions_count, 0); +	s->exception_start_sequence = 0; +	s->exception_complete_sequence = 0; +	INIT_LIST_HEAD(&s->out_of_order_list);  	init_rwsem(&s->lock);  	INIT_LIST_HEAD(&s->list);  	spin_lock_init(&s->pe_lock); @@ -1383,7 +1405,7 @@ static void pending_complete(struct dm_snap_pending_exception *pe, int success)  		goto out;  	} -	e = alloc_completed_exception(); +	e = alloc_completed_exception(GFP_NOIO);  	if (!e) {  		down_write(&s->lock);  		__invalidate_snapshot(s, -ENOMEM); @@ -1416,6 +1438,7 @@ out:  	if (full_bio) {  		full_bio->bi_end_io = pe->full_bio_end_io;  		full_bio->bi_private = pe->full_bio_private; +		atomic_inc(&full_bio->bi_remaining);  	}  	free_pending_exception(pe); @@ -1444,6 +1467,19 @@ static void commit_callback(void *context, int success)  	pending_complete(pe, success);  } +static void complete_exception(struct dm_snap_pending_exception *pe) +{ +	struct dm_snapshot *s = pe->snap; + +	if (unlikely(pe->copy_error)) +		pending_complete(pe, 0); + +	else +		/* Update the metadata if we are persistent */ +		s->store->type->commit_exception(s->store, &pe->e, +						 commit_callback, pe); +} +  /*   * Called when the copy I/O has finished.  kcopyd actually runs   * this code so don't block. @@ -1453,13 +1489,32 @@ static void copy_callback(int read_err, unsigned long write_err, void *context)  	struct dm_snap_pending_exception *pe = context;  	struct dm_snapshot *s = pe->snap; -	if (read_err || write_err) -		pending_complete(pe, 0); +	pe->copy_error = read_err || write_err; -	else -		/* Update the metadata if we are persistent */ -		s->store->type->commit_exception(s->store, &pe->e, -						 commit_callback, pe); +	if (pe->exception_sequence == s->exception_complete_sequence) { +		s->exception_complete_sequence++; +		complete_exception(pe); + +		while (!list_empty(&s->out_of_order_list)) { +			pe = list_entry(s->out_of_order_list.next, +					struct dm_snap_pending_exception, out_of_order_entry); +			if (pe->exception_sequence != s->exception_complete_sequence) +				break; +			s->exception_complete_sequence++; +			list_del(&pe->out_of_order_entry); +			complete_exception(pe); +		} +	} else { +		struct list_head *lh; +		struct dm_snap_pending_exception *pe2; + +		list_for_each_prev(lh, &s->out_of_order_list) { +			pe2 = list_entry(lh, struct dm_snap_pending_exception, out_of_order_entry); +			if (pe2->exception_sequence < pe->exception_sequence) +				break; +		} +		list_add(&pe->out_of_order_entry, lh); +	}  }  /* @@ -1554,6 +1609,8 @@ __find_pending_exception(struct dm_snapshot *s,  		return NULL;  	} +	pe->exception_sequence = s->exception_start_sequence++; +  	dm_insert_exception(&s->pending, &pe->e);  	return pe; @@ -1563,11 +1620,10 @@ static void remap_exception(struct dm_snapshot *s, struct dm_exception *e,  			    struct bio *bio, chunk_t chunk)  {  	bio->bi_bdev = s->cow->bdev; -	bio->bi_sector = chunk_to_sector(s->store, -					 dm_chunk_number(e->new_chunk) + -					 (chunk - e->old_chunk)) + -					 (bio->bi_sector & -					  s->store->chunk_mask); +	bio->bi_iter.bi_sector = +		chunk_to_sector(s->store, dm_chunk_number(e->new_chunk) + +				(chunk - e->old_chunk)) + +		(bio->bi_iter.bi_sector & s->store->chunk_mask);  }  static int snapshot_map(struct dm_target *ti, struct bio *bio) @@ -1585,7 +1641,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)  		return DM_MAPIO_REMAPPED;  	} -	chunk = sector_to_chunk(s->store, bio->bi_sector); +	chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);  	/* Full snapshots are not usable */  	/* To get here the table must be live so s->active is always set. */ @@ -1646,7 +1702,8 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)  		r = DM_MAPIO_SUBMITTED;  		if (!pe->started && -		    bio->bi_size == (s->store->chunk_size << SECTOR_SHIFT)) { +		    bio->bi_iter.bi_size == +		    (s->store->chunk_size << SECTOR_SHIFT)) {  			pe->started = 1;  			up_write(&s->lock);  			start_full_bio(pe, bio); @@ -1702,7 +1759,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)  		return DM_MAPIO_REMAPPED;  	} -	chunk = sector_to_chunk(s->store, bio->bi_sector); +	chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);  	down_write(&s->lock); @@ -2039,7 +2096,7 @@ static int do_origin(struct dm_dev *origin, struct bio *bio)  	down_read(&_origins_lock);  	o = __lookup_origin(origin->bdev);  	if (o) -		r = __origin_write(&o->snapshots, bio->bi_sector, bio); +		r = __origin_write(&o->snapshots, bio->bi_iter.bi_sector, bio);  	up_read(&_origins_lock);  	return r; @@ -2084,6 +2141,11 @@ static int origin_write_extent(struct dm_snapshot *merging_snap,   * Origin: maps a linear range of a device, with hooks for snapshotting.   */ +struct dm_origin { +	struct dm_dev *dev; +	unsigned split_boundary; +}; +  /*   * Construct an origin mapping: <dev_path>   * The context for an origin is merely a 'struct dm_dev *' @@ -2092,41 +2154,65 @@ static int origin_write_extent(struct dm_snapshot *merging_snap,  static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)  {  	int r; -	struct dm_dev *dev; +	struct dm_origin *o;  	if (argc != 1) {  		ti->error = "origin: incorrect number of arguments";  		return -EINVAL;  	} -	r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &dev); +	o = kmalloc(sizeof(struct dm_origin), GFP_KERNEL); +	if (!o) { +		ti->error = "Cannot allocate private origin structure"; +		r = -ENOMEM; +		goto bad_alloc; +	} + +	r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &o->dev);  	if (r) {  		ti->error = "Cannot get target device"; -		return r; +		goto bad_open;  	} -	ti->private = dev; +	ti->private = o;  	ti->num_flush_bios = 1;  	return 0; + +bad_open: +	kfree(o); +bad_alloc: +	return r;  }  static void origin_dtr(struct dm_target *ti)  { -	struct dm_dev *dev = ti->private; -	dm_put_device(ti, dev); +	struct dm_origin *o = ti->private; +	dm_put_device(ti, o->dev); +	kfree(o);  }  static int origin_map(struct dm_target *ti, struct bio *bio)  { -	struct dm_dev *dev = ti->private; -	bio->bi_bdev = dev->bdev; +	struct dm_origin *o = ti->private; +	unsigned available_sectors; -	if (bio->bi_rw & REQ_FLUSH) +	bio->bi_bdev = o->dev->bdev; + +	if (unlikely(bio->bi_rw & REQ_FLUSH)) +		return DM_MAPIO_REMAPPED; + +	if (bio_rw(bio) != WRITE)  		return DM_MAPIO_REMAPPED; +	available_sectors = o->split_boundary - +		((unsigned)bio->bi_iter.bi_sector & (o->split_boundary - 1)); + +	if (bio_sectors(bio) > available_sectors) +		dm_accept_partial_bio(bio, available_sectors); +  	/* Only tell snapshots if this is a write */ -	return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED; +	return do_origin(o->dev, bio);  }  /* @@ -2135,15 +2221,15 @@ static int origin_map(struct dm_target *ti, struct bio *bio)   */  static void origin_resume(struct dm_target *ti)  { -	struct dm_dev *dev = ti->private; +	struct dm_origin *o = ti->private; -	ti->max_io_len = get_origin_minimum_chunksize(dev->bdev); +	o->split_boundary = get_origin_minimum_chunksize(o->dev->bdev);  }  static void origin_status(struct dm_target *ti, status_type_t type,  			  unsigned status_flags, char *result, unsigned maxlen)  { -	struct dm_dev *dev = ti->private; +	struct dm_origin *o = ti->private;  	switch (type) {  	case STATUSTYPE_INFO: @@ -2151,7 +2237,7 @@ static void origin_status(struct dm_target *ti, status_type_t type,  		break;  	case STATUSTYPE_TABLE: -		snprintf(result, maxlen, "%s", dev->name); +		snprintf(result, maxlen, "%s", o->dev->name);  		break;  	}  } @@ -2159,13 +2245,13 @@ static void origin_status(struct dm_target *ti, status_type_t type,  static int origin_merge(struct dm_target *ti, struct bvec_merge_data *bvm,  			struct bio_vec *biovec, int max_size)  { -	struct dm_dev *dev = ti->private; -	struct request_queue *q = bdev_get_queue(dev->bdev); +	struct dm_origin *o = ti->private; +	struct request_queue *q = bdev_get_queue(o->dev->bdev);  	if (!q->merge_bvec_fn)  		return max_size; -	bvm->bi_bdev = dev->bdev; +	bvm->bi_bdev = o->dev->bdev;  	return min(max_size, q->merge_bvec_fn(q, bvm, biovec));  } @@ -2173,9 +2259,9 @@ static int origin_merge(struct dm_target *ti, struct bvec_merge_data *bvm,  static int origin_iterate_devices(struct dm_target *ti,  				  iterate_devices_callout_fn fn, void *data)  { -	struct dm_dev *dev = ti->private; +	struct dm_origin *o = ti->private; -	return fn(ti, dev, 0, ti->len, data); +	return fn(ti, o->dev, 0, ti->len, data);  }  static struct target_type origin_target = { @@ -2193,7 +2279,7 @@ static struct target_type origin_target = {  static struct target_type snapshot_target = {  	.name    = "snapshot", -	.version = {1, 11, 1}, +	.version = {1, 12, 0},  	.module  = THIS_MODULE,  	.ctr     = snapshot_ctr,  	.dtr     = snapshot_dtr, diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c index 8ae31e8d3d6..28a90122a5a 100644 --- a/drivers/md/dm-stats.c +++ b/drivers/md/dm-stats.c @@ -451,19 +451,26 @@ static void dm_stat_for_entry(struct dm_stat *s, size_t entry,  	struct dm_stat_percpu *p;  	/* -	 * For strict correctness we should use local_irq_disable/enable +	 * For strict correctness we should use local_irq_save/restore  	 * instead of preempt_disable/enable.  	 * -	 * This is racy if the driver finishes bios from non-interrupt -	 * context as well as from interrupt context or from more different -	 * interrupts. +	 * preempt_disable/enable is racy if the driver finishes bios +	 * from non-interrupt context as well as from interrupt context +	 * or from more different interrupts.  	 * -	 * However, the race only results in not counting some events, -	 * so it is acceptable. +	 * On 64-bit architectures the race only results in not counting some +	 * events, so it is acceptable.  On 32-bit architectures the race could +	 * cause the counter going off by 2^32, so we need to do proper locking +	 * there.  	 *  	 * part_stat_lock()/part_stat_unlock() have this race too.  	 */ +#if BITS_PER_LONG == 32 +	unsigned long flags; +	local_irq_save(flags); +#else  	preempt_disable(); +#endif  	p = &s->stat_percpu[smp_processor_id()][entry];  	if (!end) { @@ -478,7 +485,11 @@ static void dm_stat_for_entry(struct dm_stat *s, size_t entry,  		p->ticks[idx] += duration;  	} +#if BITS_PER_LONG == 32 +	local_irq_restore(flags); +#else  	preempt_enable(); +#endif  }  static void __dm_stat_bio(struct dm_stat *s, unsigned long bi_rw, @@ -953,6 +964,7 @@ int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv,  int __init dm_statistics_init(void)  { +	shared_memory_amount = 0;  	dm_stat_need_rcu_barrier = 0;  	return 0;  } diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c index 73c1712dad9..d1600d2aa2e 100644 --- a/drivers/md/dm-stripe.c +++ b/drivers/md/dm-stripe.c @@ -259,13 +259,15 @@ static int stripe_map_range(struct stripe_c *sc, struct bio *bio,  {  	sector_t begin, end; -	stripe_map_range_sector(sc, bio->bi_sector, target_stripe, &begin); +	stripe_map_range_sector(sc, bio->bi_iter.bi_sector, +				target_stripe, &begin);  	stripe_map_range_sector(sc, bio_end_sector(bio),  				target_stripe, &end);  	if (begin < end) {  		bio->bi_bdev = sc->stripe[target_stripe].dev->bdev; -		bio->bi_sector = begin + sc->stripe[target_stripe].physical_start; -		bio->bi_size = to_bytes(end - begin); +		bio->bi_iter.bi_sector = begin + +			sc->stripe[target_stripe].physical_start; +		bio->bi_iter.bi_size = to_bytes(end - begin);  		return DM_MAPIO_REMAPPED;  	} else {  		/* The range doesn't map to the target stripe */ @@ -293,9 +295,10 @@ static int stripe_map(struct dm_target *ti, struct bio *bio)  		return stripe_map_range(sc, bio, target_bio_nr);  	} -	stripe_map_sector(sc, bio->bi_sector, &stripe, &bio->bi_sector); +	stripe_map_sector(sc, bio->bi_iter.bi_sector, +			  &stripe, &bio->bi_iter.bi_sector); -	bio->bi_sector += sc->stripe[stripe].physical_start; +	bio->bi_iter.bi_sector += sc->stripe[stripe].physical_start;  	bio->bi_bdev = sc->stripe[stripe].dev->bdev;  	return DM_MAPIO_REMAPPED; diff --git a/drivers/md/dm-switch.c b/drivers/md/dm-switch.c index ff9ac4be472..09a688b3d48 100644 --- a/drivers/md/dm-switch.c +++ b/drivers/md/dm-switch.c @@ -311,11 +311,11 @@ error:  static int switch_map(struct dm_target *ti, struct bio *bio)  {  	struct switch_ctx *sctx = ti->private; -	sector_t offset = dm_target_offset(ti, bio->bi_sector); +	sector_t offset = dm_target_offset(ti, bio->bi_iter.bi_sector);  	unsigned path_nr = switch_get_path_nr(sctx, offset);  	bio->bi_bdev = sctx->path_list[path_nr].dmdev->bdev; -	bio->bi_sector = sctx->path_list[path_nr].start + offset; +	bio->bi_iter.bi_sector = sctx->path_list[path_nr].start + offset;  	return DM_MAPIO_REMAPPED;  } diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c index 84d2b91e4ef..c62c5ab6aed 100644 --- a/drivers/md/dm-sysfs.c +++ b/drivers/md/dm-sysfs.c @@ -86,6 +86,7 @@ static const struct sysfs_ops dm_sysfs_ops = {  static struct kobj_type dm_ktype = {  	.sysfs_ops	= &dm_sysfs_ops,  	.default_attrs	= dm_attrs, +	.release	= dm_kobject_release,  };  /* @@ -104,5 +105,7 @@ int dm_sysfs_init(struct mapped_device *md)   */  void dm_sysfs_exit(struct mapped_device *md)  { -	kobject_put(dm_kobject(md)); +	struct kobject *kobj = dm_kobject(md); +	kobject_put(kobj); +	wait_for_completion(dm_get_completion_from_kobject(kobj));  } diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 8f8783533ac..5f59f1e3e5b 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -155,7 +155,6 @@ static int alloc_targets(struct dm_table *t, unsigned int num)  {  	sector_t *n_highs;  	struct dm_target *n_targets; -	int n = t->num_targets;  	/*  	 * Allocate both the target array and offset array at once. @@ -169,12 +168,7 @@ static int alloc_targets(struct dm_table *t, unsigned int num)  	n_targets = (struct dm_target *) (n_highs + num); -	if (n) { -		memcpy(n_highs, t->highs, sizeof(*n_highs) * n); -		memcpy(n_targets, t->targets, sizeof(*n_targets) * n); -	} - -	memset(n_highs + n, -1, sizeof(*n_highs) * (num - n)); +	memset(n_highs, -1, sizeof(*n_highs) * num);  	vfree(t->highs);  	t->num_allocated = num; @@ -200,6 +194,11 @@ int dm_table_create(struct dm_table **result, fmode_t mode,  	num_targets = dm_round_up(num_targets, KEYS_PER_NODE); +	if (!num_targets) { +		kfree(t); +		return -ENOMEM; +	} +  	if (alloc_targets(t, num_targets)) {  		kfree(t);  		return -ENOMEM; @@ -256,17 +255,6 @@ void dm_table_destroy(struct dm_table *t)  }  /* - * Checks to see if we need to extend highs or targets. - */ -static inline int check_space(struct dm_table *t) -{ -	if (t->num_targets >= t->num_allocated) -		return alloc_targets(t, t->num_allocated * 2); - -	return 0; -} - -/*   * See if we've already got a device in the list.   */  static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev) @@ -477,8 +465,8 @@ int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,  }  EXPORT_SYMBOL(dm_get_device); -int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, -			 sector_t start, sector_t len, void *data) +static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, +				sector_t start, sector_t len, void *data)  {  	struct queue_limits *limits = data;  	struct block_device *bdev = dev->bdev; @@ -511,7 +499,6 @@ int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,  					  (unsigned int) (PAGE_SIZE >> 9));  	return 0;  } -EXPORT_SYMBOL_GPL(dm_set_device_limits);  /*   * Decrement a device's use count and remove it if necessary. @@ -545,14 +532,28 @@ static int adjoin(struct dm_table *table, struct dm_target *ti)  /*   * Used to dynamically allocate the arg array. + * + * We do first allocation with GFP_NOIO because dm-mpath and dm-thin must + * process messages even if some device is suspended. These messages have a + * small fixed number of arguments. + * + * On the other hand, dm-switch needs to process bulk data using messages and + * excessive use of GFP_NOIO could cause trouble.   */  static char **realloc_argv(unsigned *array_size, char **old_argv)  {  	char **argv;  	unsigned new_size; +	gfp_t gfp; -	new_size = *array_size ? *array_size * 2 : 64; -	argv = kmalloc(new_size * sizeof(*argv), GFP_KERNEL); +	if (*array_size) { +		new_size = *array_size * 2; +		gfp = GFP_KERNEL; +	} else { +		new_size = 8; +		gfp = GFP_NOIO; +	} +	argv = kmalloc(new_size * sizeof(*argv), gfp);  	if (argv) {  		memcpy(argv, old_argv, *array_size * sizeof(*argv));  		*array_size = new_size; @@ -712,8 +713,7 @@ int dm_table_add_target(struct dm_table *t, const char *type,  		return -EINVAL;  	} -	if ((r = check_space(t))) -		return r; +	BUG_ON(t->num_targets >= t->num_allocated);  	tgt = t->targets + t->num_targets;  	memset(tgt, 0, sizeof(*tgt)); @@ -944,7 +944,7 @@ bool dm_table_request_based(struct dm_table *t)  	return dm_table_get_type(t) == DM_TYPE_REQUEST_BASED;  } -int dm_table_alloc_md_mempools(struct dm_table *t) +static int dm_table_alloc_md_mempools(struct dm_table *t)  {  	unsigned type = dm_table_get_type(t);  	unsigned per_bio_data_size = 0; @@ -1548,8 +1548,11 @@ int dm_table_resume_targets(struct dm_table *t)  			continue;  		r = ti->type->preresume(ti); -		if (r) +		if (r) { +			DMERR("%s: %s: preresume failed, error = %d", +			      dm_device_name(t->md), ti->type->name, r);  			return r; +		}  	}  	for (i = 0; i < t->num_targets; i++) { @@ -1614,6 +1617,25 @@ struct mapped_device *dm_table_get_md(struct dm_table *t)  }  EXPORT_SYMBOL(dm_table_get_md); +void dm_table_run_md_queue_async(struct dm_table *t) +{ +	struct mapped_device *md; +	struct request_queue *queue; +	unsigned long flags; + +	if (!dm_table_request_based(t)) +		return; + +	md = dm_table_get_md(t); +	queue = dm_get_md_queue(md); +	if (queue) { +		spin_lock_irqsave(queue->queue_lock, flags); +		blk_run_queue_async(queue); +		spin_unlock_irqrestore(queue->queue_lock, flags); +	} +} +EXPORT_SYMBOL(dm_table_run_md_queue_async); +  static int device_discard_capable(struct dm_target *ti, struct dm_dev *dev,  				  sector_t start, sector_t len, void *data)  { diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c index 60bce435f4f..e9d33ad59df 100644 --- a/drivers/md/dm-thin-metadata.c +++ b/drivers/md/dm-thin-metadata.c @@ -76,7 +76,7 @@  #define THIN_SUPERBLOCK_MAGIC 27022010  #define THIN_SUPERBLOCK_LOCATION 0 -#define THIN_VERSION 1 +#define THIN_VERSION 2  #define THIN_METADATA_CACHE_SIZE 64  #define SECTOR_TO_BLOCK_SHIFT 3 @@ -192,6 +192,13 @@ struct dm_pool_metadata {  	 * operation possible in this state is the closing of the device.  	 */  	bool fail_io:1; + +	/* +	 * Reading the space map roots can fail, so we read it into these +	 * buffers before the superblock is locked and updated. +	 */ +	__u8 data_space_map_root[SPACE_MAP_ROOT_SIZE]; +	__u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE];  };  struct dm_thin_device { @@ -431,26 +438,53 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)  	pmd->details_info.value_type.equal = NULL;  } +static int save_sm_roots(struct dm_pool_metadata *pmd) +{ +	int r; +	size_t len; + +	r = dm_sm_root_size(pmd->metadata_sm, &len); +	if (r < 0) +		return r; + +	r = dm_sm_copy_root(pmd->metadata_sm, &pmd->metadata_space_map_root, len); +	if (r < 0) +		return r; + +	r = dm_sm_root_size(pmd->data_sm, &len); +	if (r < 0) +		return r; + +	return dm_sm_copy_root(pmd->data_sm, &pmd->data_space_map_root, len); +} + +static void copy_sm_roots(struct dm_pool_metadata *pmd, +			  struct thin_disk_superblock *disk) +{ +	memcpy(&disk->metadata_space_map_root, +	       &pmd->metadata_space_map_root, +	       sizeof(pmd->metadata_space_map_root)); + +	memcpy(&disk->data_space_map_root, +	       &pmd->data_space_map_root, +	       sizeof(pmd->data_space_map_root)); +} +  static int __write_initial_superblock(struct dm_pool_metadata *pmd)  {  	int r;  	struct dm_block *sblock; -	size_t metadata_len, data_len;  	struct thin_disk_superblock *disk_super;  	sector_t bdev_size = i_size_read(pmd->bdev->bd_inode) >> SECTOR_SHIFT;  	if (bdev_size > THIN_METADATA_MAX_SECTORS)  		bdev_size = THIN_METADATA_MAX_SECTORS; -	r = dm_sm_root_size(pmd->metadata_sm, &metadata_len); -	if (r < 0) -		return r; - -	r = dm_sm_root_size(pmd->data_sm, &data_len); +	r = dm_sm_commit(pmd->data_sm);  	if (r < 0)  		return r; -	r = dm_sm_commit(pmd->data_sm); +	r = save_sm_roots(pmd);  	if (r < 0)  		return r; @@ -471,27 +505,15 @@ static int __write_initial_superblock(struct dm_pool_metadata *pmd)  	disk_super->trans_id = 0;  	disk_super->held_root = 0; -	r = dm_sm_copy_root(pmd->metadata_sm, &disk_super->metadata_space_map_root, -			    metadata_len); -	if (r < 0) -		goto bad_locked; - -	r = dm_sm_copy_root(pmd->data_sm, &disk_super->data_space_map_root, -			    data_len); -	if (r < 0) -		goto bad_locked; +	copy_sm_roots(pmd, disk_super);  	disk_super->data_mapping_root = cpu_to_le64(pmd->root);  	disk_super->device_details_root = cpu_to_le64(pmd->details_root); -	disk_super->metadata_block_size = cpu_to_le32(THIN_METADATA_BLOCK_SIZE >> SECTOR_SHIFT); +	disk_super->metadata_block_size = cpu_to_le32(THIN_METADATA_BLOCK_SIZE);  	disk_super->metadata_nr_blocks = cpu_to_le64(bdev_size >> SECTOR_TO_BLOCK_SHIFT);  	disk_super->data_block_size = cpu_to_le32(pmd->data_block_size);  	return dm_tm_commit(pmd->tm, sblock); - -bad_locked: -	dm_bm_unlock(sblock); -	return r;  }  static int __format_metadata(struct dm_pool_metadata *pmd) @@ -591,6 +613,15 @@ static int __open_metadata(struct dm_pool_metadata *pmd)  	disk_super = dm_block_data(sblock); +	/* Verify the data block size hasn't changed */ +	if (le32_to_cpu(disk_super->data_block_size) != pmd->data_block_size) { +		DMERR("changing the data block size (from %u to %llu) is not supported", +		      le32_to_cpu(disk_super->data_block_size), +		      (unsigned long long)pmd->data_block_size); +		r = -EINVAL; +		goto bad_unlock_sblock; +	} +  	r = __check_incompat_features(disk_super, pmd);  	if (r < 0)  		goto bad_unlock_sblock; @@ -651,7 +682,7 @@ static int __create_persistent_data_objects(struct dm_pool_metadata *pmd, bool f  {  	int r; -	pmd->bm = dm_block_manager_create(pmd->bdev, THIN_METADATA_BLOCK_SIZE, +	pmd->bm = dm_block_manager_create(pmd->bdev, THIN_METADATA_BLOCK_SIZE << SECTOR_SHIFT,  					  THIN_METADATA_CACHE_SIZE,  					  THIN_MAX_CONCURRENT_LOCKS);  	if (IS_ERR(pmd->bm)) { @@ -769,6 +800,10 @@ static int __commit_transaction(struct dm_pool_metadata *pmd)  	if (r < 0)  		return r; +	r = save_sm_roots(pmd); +	if (r < 0) +		return r; +  	r = superblock_lock(pmd, &sblock);  	if (r)  		return r; @@ -780,21 +815,9 @@ static int __commit_transaction(struct dm_pool_metadata *pmd)  	disk_super->trans_id = cpu_to_le64(pmd->trans_id);  	disk_super->flags = cpu_to_le32(pmd->flags); -	r = dm_sm_copy_root(pmd->metadata_sm, &disk_super->metadata_space_map_root, -			    metadata_len); -	if (r < 0) -		goto out_locked; - -	r = dm_sm_copy_root(pmd->data_sm, &disk_super->data_space_map_root, -			    data_len); -	if (r < 0) -		goto out_locked; +	copy_sm_roots(pmd, disk_super);  	return dm_tm_commit(pmd->tm, sblock); - -out_locked: -	dm_bm_unlock(sblock); -	return r;  }  struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev, @@ -1349,6 +1372,12 @@ dm_thin_id dm_thin_dev_id(struct dm_thin_device *td)  	return td->id;  } +/* + * Check whether @time (of block creation) is older than @td's last snapshot. + * If so then the associated block is shared with the last snapshot device. + * Any block on a device created *after* the device last got snapshotted is + * necessarily not shared. + */  static bool __snapshotted_since(struct dm_thin_device *td, uint32_t time)  {  	return td->snapshotted_time > time; @@ -1458,6 +1487,20 @@ int dm_thin_remove_block(struct dm_thin_device *td, dm_block_t block)  	return r;  } +int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result) +{ +	int r; +	uint32_t ref_count; + +	down_read(&pmd->root_lock); +	r = dm_sm_get_count(pmd->data_sm, b, &ref_count); +	if (!r) +		*result = (ref_count != 0); +	up_read(&pmd->root_lock); + +	return r; +} +  bool dm_thin_changed_this_transaction(struct dm_thin_device *td)  {  	int r; @@ -1469,6 +1512,23 @@ bool dm_thin_changed_this_transaction(struct dm_thin_device *td)  	return r;  } +bool dm_pool_changed_this_transaction(struct dm_pool_metadata *pmd) +{ +	bool r = false; +	struct dm_thin_device *td, *tmp; + +	down_read(&pmd->root_lock); +	list_for_each_entry_safe(td, tmp, &pmd->thin_devices, list) { +		if (td->changed) { +			r = td->changed; +			break; +		} +	} +	up_read(&pmd->root_lock); + +	return r; +} +  bool dm_thin_aborted_changes(struct dm_thin_device *td)  {  	bool r; @@ -1697,6 +1757,14 @@ void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd)  	up_write(&pmd->root_lock);  } +void dm_pool_metadata_read_write(struct dm_pool_metadata *pmd) +{ +	down_write(&pmd->root_lock); +	pmd->read_only = false; +	dm_bm_set_read_write(pmd->bm); +	up_write(&pmd->root_lock); +} +  int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,  					dm_block_t threshold,  					dm_sm_threshold_fn fn, @@ -1710,3 +1778,38 @@ int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,  	return r;  } + +int dm_pool_metadata_set_needs_check(struct dm_pool_metadata *pmd) +{ +	int r; +	struct dm_block *sblock; +	struct thin_disk_superblock *disk_super; + +	down_write(&pmd->root_lock); +	pmd->flags |= THIN_METADATA_NEEDS_CHECK_FLAG; + +	r = superblock_lock(pmd, &sblock); +	if (r) { +		DMERR("couldn't read superblock"); +		goto out; +	} + +	disk_super = dm_block_data(sblock); +	disk_super->flags = cpu_to_le32(pmd->flags); + +	dm_bm_unlock(sblock); +out: +	up_write(&pmd->root_lock); +	return r; +} + +bool dm_pool_metadata_needs_check(struct dm_pool_metadata *pmd) +{ +	bool needs_check; + +	down_read(&pmd->root_lock); +	needs_check = pmd->flags & THIN_METADATA_NEEDS_CHECK_FLAG; +	up_read(&pmd->root_lock); + +	return needs_check; +} diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h index 845ebbe589a..e3c857db195 100644 --- a/drivers/md/dm-thin-metadata.h +++ b/drivers/md/dm-thin-metadata.h @@ -9,16 +9,14 @@  #include "persistent-data/dm-block-manager.h"  #include "persistent-data/dm-space-map.h" +#include "persistent-data/dm-space-map-metadata.h" -#define THIN_METADATA_BLOCK_SIZE 4096 +#define THIN_METADATA_BLOCK_SIZE DM_SM_METADATA_BLOCK_SIZE  /*   * The metadata device is currently limited in size. - * - * We have one block of index, which can hold 255 index entries.  Each - * index entry contains allocation info about 16k metadata blocks.   */ -#define THIN_METADATA_MAX_SECTORS (255 * (1 << 14) * (THIN_METADATA_BLOCK_SIZE / (1 << SECTOR_SHIFT))) +#define THIN_METADATA_MAX_SECTORS DM_SM_METADATA_MAX_SECTORS  /*   * A metadata device larger than 16GB triggers a warning. @@ -27,6 +25,11 @@  /*----------------------------------------------------------------*/ +/* + * Thin metadata superblock flags. + */ +#define THIN_METADATA_NEEDS_CHECK_FLAG (1 << 0) +  struct dm_pool_metadata;  struct dm_thin_device; @@ -131,7 +134,7 @@ dm_thin_id dm_thin_dev_id(struct dm_thin_device *td);  struct dm_thin_lookup_result {  	dm_block_t block; -	unsigned shared:1; +	bool shared:1;  };  /* @@ -161,6 +164,8 @@ int dm_thin_remove_block(struct dm_thin_device *td, dm_block_t block);   */  bool dm_thin_changed_this_transaction(struct dm_thin_device *td); +bool dm_pool_changed_this_transaction(struct dm_pool_metadata *pmd); +  bool dm_thin_aborted_changes(struct dm_thin_device *td);  int dm_thin_get_highest_mapped_block(struct dm_thin_device *td, @@ -181,6 +186,8 @@ int dm_pool_get_data_block_size(struct dm_pool_metadata *pmd, sector_t *result);  int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result); +int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result); +  /*   * Returns -ENOSPC if the new size is too small and already allocated   * blocks would be lost. @@ -193,12 +200,19 @@ int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_siz   * that nothing is changing.   */  void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd); +void dm_pool_metadata_read_write(struct dm_pool_metadata *pmd);  int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,  					dm_block_t threshold,  					dm_sm_threshold_fn fn,  					void *context); +/* + * Updates the superblock immediately. + */ +int dm_pool_metadata_set_needs_check(struct dm_pool_metadata *pmd); +bool dm_pool_metadata_needs_check(struct dm_pool_metadata *pmd); +  /*----------------------------------------------------------------*/  #endif diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index ed063427d67..fc9c848a60c 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -12,9 +12,11 @@  #include <linux/dm-io.h>  #include <linux/dm-kcopyd.h>  #include <linux/list.h> +#include <linux/rculist.h>  #include <linux/init.h>  #include <linux/module.h>  #include <linux/slab.h> +#include <linux/rbtree.h>  #define	DM_MSG_PREFIX	"thin" @@ -25,6 +27,9 @@  #define MAPPING_POOL_SIZE 1024  #define PRISON_CELLS 1024  #define COMMIT_PERIOD HZ +#define NO_SPACE_TIMEOUT_SECS 60 + +static unsigned no_space_timeout_secs = NO_SPACE_TIMEOUT_SECS;  DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,  		"A percentage of time allocated for copy on write"); @@ -130,10 +135,11 @@ static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,  struct dm_thin_new_mapping;  /* - * The pool runs in 3 modes.  Ordered in degraded order for comparisons. + * The pool runs in 4 modes.  Ordered in degraded order for comparisons.   */  enum pool_mode {  	PM_WRITE,		/* metadata may be changed */ +	PM_OUT_OF_DATA_SPACE,	/* metadata may be changed, though data may not be allocated */  	PM_READ_ONLY,		/* metadata may not be changed */  	PM_FAIL,		/* all I/O fails */  }; @@ -144,6 +150,7 @@ struct pool_features {  	bool zero_new_blocks:1;  	bool discard_enabled:1;  	bool discard_passdown:1; +	bool error_if_no_space:1;  };  struct thin_c; @@ -163,8 +170,7 @@ struct pool {  	int sectors_per_block_shift;  	struct pool_features pf; -	unsigned low_water_triggered:1;	/* A dm event has been sent */ -	unsigned no_free_space:1;	/* A -ENOSPC warning has been issued */ +	bool low_water_triggered:1;	/* A dm event has been sent */  	struct dm_bio_prison *prison;  	struct dm_kcopyd_client *copier; @@ -172,17 +178,16 @@ struct pool {  	struct workqueue_struct *wq;  	struct work_struct worker;  	struct delayed_work waker; +	struct delayed_work no_space_timeout;  	unsigned long last_commit_jiffies;  	unsigned ref_count;  	spinlock_t lock; -	struct bio_list deferred_bios;  	struct bio_list deferred_flush_bios;  	struct list_head prepared_mappings;  	struct list_head prepared_discards; - -	struct bio_list retry_on_resume_list; +	struct list_head active_thins;  	struct dm_deferred_set *shared_read_ds;  	struct dm_deferred_set *all_io_ds; @@ -198,7 +203,7 @@ struct pool {  };  static enum pool_mode get_pool_mode(struct pool *pool); -static void set_pool_mode(struct pool *pool, enum pool_mode mode); +static void metadata_operation_failed(struct pool *pool, const char *op, int r);  /*   * Target context for a pool. @@ -219,12 +224,25 @@ struct pool_c {   * Target context for a thin.   */  struct thin_c { +	struct list_head list;  	struct dm_dev *pool_dev;  	struct dm_dev *origin_dev;  	dm_thin_id dev_id;  	struct pool *pool;  	struct dm_thin_device *td; +	bool requeue_mode:1; +	spinlock_t lock; +	struct bio_list deferred_bio_list; +	struct bio_list retry_on_resume_list; +	struct rb_root sort_bio_list; /* sorted list of deferred bios */ + +	/* +	 * Ensures the thin is not destroyed until the worker has finished +	 * iterating the active_thins list. +	 */ +	atomic_t refcount; +	struct completion can_destroy;  };  /*----------------------------------------------------------------*/ @@ -285,20 +303,25 @@ static void cell_defer_no_holder_no_free(struct thin_c *tc,  	struct pool *pool = tc->pool;  	unsigned long flags; -	spin_lock_irqsave(&pool->lock, flags); -	dm_cell_release_no_holder(pool->prison, cell, &pool->deferred_bios); -	spin_unlock_irqrestore(&pool->lock, flags); +	spin_lock_irqsave(&tc->lock, flags); +	dm_cell_release_no_holder(pool->prison, cell, &tc->deferred_bio_list); +	spin_unlock_irqrestore(&tc->lock, flags);  	wake_worker(pool);  } -static void cell_error(struct pool *pool, -		       struct dm_bio_prison_cell *cell) +static void cell_error_with_code(struct pool *pool, +				 struct dm_bio_prison_cell *cell, int error_code)  { -	dm_cell_error(pool->prison, cell); +	dm_cell_error(pool->prison, cell, error_code);  	dm_bio_prison_free_cell(pool->prison, cell);  } +static void cell_error(struct pool *pool, struct dm_bio_prison_cell *cell) +{ +	cell_error_with_code(pool, cell, -EIO); +} +  /*----------------------------------------------------------------*/  /* @@ -366,36 +389,57 @@ struct dm_thin_endio_hook {  	struct dm_deferred_entry *shared_read_entry;  	struct dm_deferred_entry *all_io_entry;  	struct dm_thin_new_mapping *overwrite_mapping; +	struct rb_node rb_node;  }; -static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master) +static void requeue_bio_list(struct thin_c *tc, struct bio_list *master)  {  	struct bio *bio;  	struct bio_list bios; +	unsigned long flags;  	bio_list_init(&bios); + +	spin_lock_irqsave(&tc->lock, flags);  	bio_list_merge(&bios, master);  	bio_list_init(master); +	spin_unlock_irqrestore(&tc->lock, flags); -	while ((bio = bio_list_pop(&bios))) { -		struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); - -		if (h->tc == tc) -			bio_endio(bio, DM_ENDIO_REQUEUE); -		else -			bio_list_add(master, bio); -	} +	while ((bio = bio_list_pop(&bios))) +		bio_endio(bio, DM_ENDIO_REQUEUE);  }  static void requeue_io(struct thin_c *tc)  { -	struct pool *pool = tc->pool; +	requeue_bio_list(tc, &tc->deferred_bio_list); +	requeue_bio_list(tc, &tc->retry_on_resume_list); +} + +static void error_thin_retry_list(struct thin_c *tc) +{ +	struct bio *bio;  	unsigned long flags; +	struct bio_list bios; -	spin_lock_irqsave(&pool->lock, flags); -	__requeue_bio_list(tc, &pool->deferred_bios); -	__requeue_bio_list(tc, &pool->retry_on_resume_list); -	spin_unlock_irqrestore(&pool->lock, flags); +	bio_list_init(&bios); + +	spin_lock_irqsave(&tc->lock, flags); +	bio_list_merge(&bios, &tc->retry_on_resume_list); +	bio_list_init(&tc->retry_on_resume_list); +	spin_unlock_irqrestore(&tc->lock, flags); + +	while ((bio = bio_list_pop(&bios))) +		bio_io_error(bio); +} + +static void error_retry_list(struct pool *pool) +{ +	struct thin_c *tc; + +	rcu_read_lock(); +	list_for_each_entry_rcu(tc, &pool->active_thins, list) +		error_thin_retry_list(tc); +	rcu_read_unlock();  }  /* @@ -413,7 +457,7 @@ static bool block_size_is_power_of_two(struct pool *pool)  static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)  {  	struct pool *pool = tc->pool; -	sector_t block_nr = bio->bi_sector; +	sector_t block_nr = bio->bi_iter.bi_sector;  	if (block_size_is_power_of_two(pool))  		block_nr >>= pool->sectors_per_block_shift; @@ -426,14 +470,15 @@ static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)  static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)  {  	struct pool *pool = tc->pool; -	sector_t bi_sector = bio->bi_sector; +	sector_t bi_sector = bio->bi_iter.bi_sector;  	bio->bi_bdev = tc->pool_dev->bdev;  	if (block_size_is_power_of_two(pool)) -		bio->bi_sector = (block << pool->sectors_per_block_shift) | -				(bi_sector & (pool->sectors_per_block - 1)); +		bio->bi_iter.bi_sector = +			(block << pool->sectors_per_block_shift) | +			(bi_sector & (pool->sectors_per_block - 1));  	else -		bio->bi_sector = (block * pool->sectors_per_block) + +		bio->bi_iter.bi_sector = (block * pool->sectors_per_block) +  				 sector_div(bi_sector, pool->sectors_per_block);  } @@ -509,15 +554,16 @@ static void remap_and_issue(struct thin_c *tc, struct bio *bio,  struct dm_thin_new_mapping {  	struct list_head list; -	unsigned quiesced:1; -	unsigned prepared:1; -	unsigned pass_discard:1; +	bool quiesced:1; +	bool prepared:1; +	bool pass_discard:1; +	bool definitely_not_shared:1; +	int err;  	struct thin_c *tc;  	dm_block_t virt_block;  	dm_block_t data_block;  	struct dm_bio_prison_cell *cell, *cell2; -	int err;  	/*  	 * If the bio covers the whole area of a block then we can avoid @@ -534,7 +580,7 @@ static void __maybe_add_mapping(struct dm_thin_new_mapping *m)  	struct pool *pool = m->tc->pool;  	if (m->quiesced && m->prepared) { -		list_add(&m->list, &pool->prepared_mappings); +		list_add_tail(&m->list, &pool->prepared_mappings);  		wake_worker(pool);  	}  } @@ -548,7 +594,7 @@ static void copy_complete(int read_err, unsigned long write_err, void *context)  	m->err = read_err || write_err ? -EIO : 0;  	spin_lock_irqsave(&pool->lock, flags); -	m->prepared = 1; +	m->prepared = true;  	__maybe_add_mapping(m);  	spin_unlock_irqrestore(&pool->lock, flags);  } @@ -563,7 +609,7 @@ static void overwrite_endio(struct bio *bio, int err)  	m->err = err;  	spin_lock_irqsave(&pool->lock, flags); -	m->prepared = 1; +	m->prepared = true;  	__maybe_add_mapping(m);  	spin_unlock_irqrestore(&pool->lock, flags);  } @@ -586,9 +632,9 @@ static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell)  	struct pool *pool = tc->pool;  	unsigned long flags; -	spin_lock_irqsave(&pool->lock, flags); -	cell_release(pool, cell, &pool->deferred_bios); -	spin_unlock_irqrestore(&tc->pool->lock, flags); +	spin_lock_irqsave(&tc->lock, flags); +	cell_release(pool, cell, &tc->deferred_bio_list); +	spin_unlock_irqrestore(&tc->lock, flags);  	wake_worker(pool);  } @@ -601,17 +647,19 @@ static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *c  	struct pool *pool = tc->pool;  	unsigned long flags; -	spin_lock_irqsave(&pool->lock, flags); -	cell_release_no_holder(pool, cell, &pool->deferred_bios); -	spin_unlock_irqrestore(&pool->lock, flags); +	spin_lock_irqsave(&tc->lock, flags); +	cell_release_no_holder(pool, cell, &tc->deferred_bio_list); +	spin_unlock_irqrestore(&tc->lock, flags);  	wake_worker(pool);  }  static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)  { -	if (m->bio) +	if (m->bio) {  		m->bio->bi_end_io = m->saved_bi_end_io; +		atomic_inc(&m->bio->bi_remaining); +	}  	cell_error(m->tc->pool, m->cell);  	list_del(&m->list);  	mempool_free(m, m->tc->pool->mapping_pool); @@ -625,8 +673,10 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)  	int r;  	bio = m->bio; -	if (bio) +	if (bio) {  		bio->bi_end_io = m->saved_bi_end_io; +		atomic_inc(&bio->bi_remaining); +	}  	if (m->err) {  		cell_error(pool, m->cell); @@ -640,7 +690,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)  	 */  	r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block);  	if (r) { -		DMERR_LIMIT("dm_thin_insert_block() failed"); +		metadata_operation_failed(pool, "dm_thin_insert_block", r);  		cell_error(pool, m->cell);  		goto out;  	} @@ -681,7 +731,15 @@ static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m)  	cell_defer_no_holder(tc, m->cell2);  	if (m->pass_discard) -		remap_and_issue(tc, m->bio, m->data_block); +		if (m->definitely_not_shared) +			remap_and_issue(tc, m->bio, m->data_block); +		else { +			bool used = false; +			if (dm_pool_block_is_used(tc->pool->pmd, m->data_block, &used) || used) +				bio_endio(m->bio, 0); +			else +				remap_and_issue(tc, m->bio, m->data_block); +		}  	else  		bio_endio(m->bio, 0); @@ -721,7 +779,8 @@ static void process_prepared(struct pool *pool, struct list_head *head,   */  static int io_overlaps_block(struct pool *pool, struct bio *bio)  { -	return bio->bi_size == (pool->sectors_per_block << SECTOR_SHIFT); +	return bio->bi_iter.bi_size == +		(pool->sectors_per_block << SECTOR_SHIFT);  }  static int io_overwrites_block(struct pool *pool, struct bio *bio) @@ -749,13 +808,17 @@ static int ensure_next_mapping(struct pool *pool)  static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool)  { -	struct dm_thin_new_mapping *r = pool->next_mapping; +	struct dm_thin_new_mapping *m = pool->next_mapping;  	BUG_ON(!pool->next_mapping); +	memset(m, 0, sizeof(struct dm_thin_new_mapping)); +	INIT_LIST_HEAD(&m->list); +	m->bio = NULL; +  	pool->next_mapping = NULL; -	return r; +	return m;  }  static void schedule_copy(struct thin_c *tc, dm_block_t virt_block, @@ -767,18 +830,13 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,  	struct pool *pool = tc->pool;  	struct dm_thin_new_mapping *m = get_next_mapping(pool); -	INIT_LIST_HEAD(&m->list); -	m->quiesced = 0; -	m->prepared = 0;  	m->tc = tc;  	m->virt_block = virt_block;  	m->data_block = data_dest;  	m->cell = cell; -	m->err = 0; -	m->bio = NULL;  	if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list)) -		m->quiesced = 1; +		m->quiesced = true;  	/*  	 * IO to pool_dev remaps to the pool target's data_dev. @@ -838,15 +896,12 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,  	struct pool *pool = tc->pool;  	struct dm_thin_new_mapping *m = get_next_mapping(pool); -	INIT_LIST_HEAD(&m->list); -	m->quiesced = 1; -	m->prepared = 0; +	m->quiesced = true; +	m->prepared = false;  	m->tc = tc;  	m->virt_block = virt_block;  	m->data_block = data_block;  	m->cell = cell; -	m->err = 0; -	m->bio = NULL;  	/*  	 * If the whole block of data is being overwritten or we are not @@ -881,94 +936,83 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,  	}  } -static int commit(struct pool *pool) -{ -	int r; - -	r = dm_pool_commit_metadata(pool->pmd); -	if (r) -		DMERR_LIMIT("%s: commit failed: error = %d", -			    dm_device_name(pool->pool_md), r); - -	return r; -} -  /*   * A non-zero return indicates read_only or fail_io mode.   * Many callers don't care about the return value.   */ -static int commit_or_fallback(struct pool *pool) +static int commit(struct pool *pool)  {  	int r; -	if (get_pool_mode(pool) != PM_WRITE) +	if (get_pool_mode(pool) >= PM_READ_ONLY)  		return -EINVAL; -	r = commit(pool); +	r = dm_pool_commit_metadata(pool->pmd);  	if (r) -		set_pool_mode(pool, PM_READ_ONLY); +		metadata_operation_failed(pool, "dm_pool_commit_metadata", r);  	return r;  } -static int alloc_data_block(struct thin_c *tc, dm_block_t *result) +static void check_low_water_mark(struct pool *pool, dm_block_t free_blocks)  { -	int r; -	dm_block_t free_blocks;  	unsigned long flags; -	struct pool *pool = tc->pool; - -	/* -	 * Once no_free_space is set we must not allow allocation to succeed. -	 * Otherwise it is difficult to explain, debug, test and support. -	 */ -	if (pool->no_free_space) -		return -ENOSPC; - -	r = dm_pool_get_free_block_count(pool->pmd, &free_blocks); -	if (r) -		return r;  	if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) {  		DMWARN("%s: reached low water mark for data device: sending event.",  		       dm_device_name(pool->pool_md));  		spin_lock_irqsave(&pool->lock, flags); -		pool->low_water_triggered = 1; +		pool->low_water_triggered = true;  		spin_unlock_irqrestore(&pool->lock, flags);  		dm_table_event(pool->ti->table);  	} +} + +static void set_pool_mode(struct pool *pool, enum pool_mode new_mode); + +static int alloc_data_block(struct thin_c *tc, dm_block_t *result) +{ +	int r; +	dm_block_t free_blocks; +	struct pool *pool = tc->pool; + +	if (WARN_ON(get_pool_mode(pool) != PM_WRITE)) +		return -EINVAL; + +	r = dm_pool_get_free_block_count(pool->pmd, &free_blocks); +	if (r) { +		metadata_operation_failed(pool, "dm_pool_get_free_block_count", r); +		return r; +	} + +	check_low_water_mark(pool, free_blocks);  	if (!free_blocks) {  		/*  		 * Try to commit to see if that will free up some  		 * more space.  		 */ -		(void) commit_or_fallback(pool); +		r = commit(pool); +		if (r) +			return r;  		r = dm_pool_get_free_block_count(pool->pmd, &free_blocks); -		if (r) +		if (r) { +			metadata_operation_failed(pool, "dm_pool_get_free_block_count", r);  			return r; +		} -		/* -		 * If we still have no space we set a flag to avoid -		 * doing all this checking and return -ENOSPC.  This -		 * flag serves as a latch that disallows allocations from -		 * this pool until the admin takes action (e.g. resize or -		 * table reload). -		 */  		if (!free_blocks) { -			DMWARN("%s: no free space available.", -			       dm_device_name(pool->pool_md)); -			spin_lock_irqsave(&pool->lock, flags); -			pool->no_free_space = 1; -			spin_unlock_irqrestore(&pool->lock, flags); +			set_pool_mode(pool, PM_OUT_OF_DATA_SPACE);  			return -ENOSPC;  		}  	}  	r = dm_pool_alloc_data_block(pool->pmd, result); -	if (r) +	if (r) { +		metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);  		return r; +	}  	return 0;  } @@ -981,24 +1025,68 @@ static void retry_on_resume(struct bio *bio)  {  	struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));  	struct thin_c *tc = h->tc; -	struct pool *pool = tc->pool;  	unsigned long flags; -	spin_lock_irqsave(&pool->lock, flags); -	bio_list_add(&pool->retry_on_resume_list, bio); -	spin_unlock_irqrestore(&pool->lock, flags); +	spin_lock_irqsave(&tc->lock, flags); +	bio_list_add(&tc->retry_on_resume_list, bio); +	spin_unlock_irqrestore(&tc->lock, flags); +} + +static int should_error_unserviceable_bio(struct pool *pool) +{ +	enum pool_mode m = get_pool_mode(pool); + +	switch (m) { +	case PM_WRITE: +		/* Shouldn't get here */ +		DMERR_LIMIT("bio unserviceable, yet pool is in PM_WRITE mode"); +		return -EIO; + +	case PM_OUT_OF_DATA_SPACE: +		return pool->pf.error_if_no_space ? -ENOSPC : 0; + +	case PM_READ_ONLY: +	case PM_FAIL: +		return -EIO; +	default: +		/* Shouldn't get here */ +		DMERR_LIMIT("bio unserviceable, yet pool has an unknown mode"); +		return -EIO; +	} +} + +static void handle_unserviceable_bio(struct pool *pool, struct bio *bio) +{ +	int error = should_error_unserviceable_bio(pool); + +	if (error) +		bio_endio(bio, error); +	else +		retry_on_resume(bio);  } -static void no_space(struct pool *pool, struct dm_bio_prison_cell *cell) +static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *cell)  {  	struct bio *bio;  	struct bio_list bios; +	int error; + +	error = should_error_unserviceable_bio(pool); +	if (error) { +		cell_error_with_code(pool, cell, error); +		return; +	}  	bio_list_init(&bios);  	cell_release(pool, cell, &bios); -	while ((bio = bio_list_pop(&bios))) -		retry_on_resume(bio); +	error = should_error_unserviceable_bio(pool); +	if (error) +		while ((bio = bio_list_pop(&bios))) +			bio_endio(bio, error); +	else +		while ((bio = bio_list_pop(&bios))) +			retry_on_resume(bio);  }  static void process_discard(struct thin_c *tc, struct bio *bio) @@ -1037,17 +1125,17 @@ static void process_discard(struct thin_c *tc, struct bio *bio)  			 */  			m = get_next_mapping(pool);  			m->tc = tc; -			m->pass_discard = (!lookup_result.shared) && pool->pf.discard_passdown; +			m->pass_discard = pool->pf.discard_passdown; +			m->definitely_not_shared = !lookup_result.shared;  			m->virt_block = block;  			m->data_block = lookup_result.block;  			m->cell = cell;  			m->cell2 = cell2; -			m->err = 0;  			m->bio = bio;  			if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) {  				spin_lock_irqsave(&pool->lock, flags); -				list_add(&m->list, &pool->prepared_discards); +				list_add_tail(&m->list, &pool->prepared_discards);  				spin_unlock_irqrestore(&pool->lock, flags);  				wake_worker(pool);  			} @@ -1102,13 +1190,12 @@ static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,  		break;  	case -ENOSPC: -		no_space(pool, cell); +		retry_bios_on_resume(pool, cell);  		break;  	default:  		DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",  			    __func__, r); -		set_pool_mode(pool, PM_READ_ONLY);  		cell_error(pool, cell);  		break;  	} @@ -1130,7 +1217,7 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio,  	if (bio_detain(pool, &key, bio, &cell))  		return; -	if (bio_data_dir(bio) == WRITE && bio->bi_size) +	if (bio_data_dir(bio) == WRITE && bio->bi_iter.bi_size)  		break_sharing(tc, bio, block, &key, lookup_result, cell);  	else {  		struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); @@ -1153,7 +1240,7 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block  	/*  	 * Remap empty bios (flushes) immediately, without provisioning.  	 */ -	if (!bio->bi_size) { +	if (!bio->bi_iter.bi_size) {  		inc_all_io_entry(pool, bio);  		cell_defer_no_holder(tc, cell); @@ -1181,13 +1268,12 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block  		break;  	case -ENOSPC: -		no_space(pool, cell); +		retry_bios_on_resume(pool, cell);  		break;  	default:  		DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",  			    __func__, r); -		set_pool_mode(pool, PM_READ_ONLY);  		cell_error(pool, cell);  		break;  	} @@ -1253,8 +1339,8 @@ static void process_bio_read_only(struct thin_c *tc, struct bio *bio)  	r = dm_thin_find_block(tc->td, block, 1, &lookup_result);  	switch (r) {  	case 0: -		if (lookup_result.shared && (rw == WRITE) && bio->bi_size) -			bio_io_error(bio); +		if (lookup_result.shared && (rw == WRITE) && bio->bi_iter.bi_size) +			handle_unserviceable_bio(tc->pool, bio);  		else {  			inc_all_io_entry(tc->pool, bio);  			remap_and_issue(tc, bio, lookup_result.block); @@ -1263,7 +1349,7 @@ static void process_bio_read_only(struct thin_c *tc, struct bio *bio)  	case -ENODATA:  		if (rw != READ) { -			bio_io_error(bio); +			handle_unserviceable_bio(tc->pool, bio);  			break;  		} @@ -1285,6 +1371,11 @@ static void process_bio_read_only(struct thin_c *tc, struct bio *bio)  	}  } +static void process_bio_success(struct thin_c *tc, struct bio *bio) +{ +	bio_endio(bio, 0); +} +  static void process_bio_fail(struct thin_c *tc, struct bio *bio)  {  	bio_io_error(bio); @@ -1300,33 +1391,111 @@ static int need_commit_due_to_time(struct pool *pool)  	       jiffies > pool->last_commit_jiffies + COMMIT_PERIOD;  } -static void process_deferred_bios(struct pool *pool) +#define thin_pbd(node) rb_entry((node), struct dm_thin_endio_hook, rb_node) +#define thin_bio(pbd) dm_bio_from_per_bio_data((pbd), sizeof(struct dm_thin_endio_hook)) + +static void __thin_bio_rb_add(struct thin_c *tc, struct bio *bio) +{ +	struct rb_node **rbp, *parent; +	struct dm_thin_endio_hook *pbd; +	sector_t bi_sector = bio->bi_iter.bi_sector; + +	rbp = &tc->sort_bio_list.rb_node; +	parent = NULL; +	while (*rbp) { +		parent = *rbp; +		pbd = thin_pbd(parent); + +		if (bi_sector < thin_bio(pbd)->bi_iter.bi_sector) +			rbp = &(*rbp)->rb_left; +		else +			rbp = &(*rbp)->rb_right; +	} + +	pbd = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); +	rb_link_node(&pbd->rb_node, parent, rbp); +	rb_insert_color(&pbd->rb_node, &tc->sort_bio_list); +} + +static void __extract_sorted_bios(struct thin_c *tc) +{ +	struct rb_node *node; +	struct dm_thin_endio_hook *pbd; +	struct bio *bio; + +	for (node = rb_first(&tc->sort_bio_list); node; node = rb_next(node)) { +		pbd = thin_pbd(node); +		bio = thin_bio(pbd); + +		bio_list_add(&tc->deferred_bio_list, bio); +		rb_erase(&pbd->rb_node, &tc->sort_bio_list); +	} + +	WARN_ON(!RB_EMPTY_ROOT(&tc->sort_bio_list)); +} + +static void __sort_thin_deferred_bios(struct thin_c *tc)  { +	struct bio *bio; +	struct bio_list bios; + +	bio_list_init(&bios); +	bio_list_merge(&bios, &tc->deferred_bio_list); +	bio_list_init(&tc->deferred_bio_list); + +	/* Sort deferred_bio_list using rb-tree */ +	while ((bio = bio_list_pop(&bios))) +		__thin_bio_rb_add(tc, bio); + +	/* +	 * Transfer the sorted bios in sort_bio_list back to +	 * deferred_bio_list to allow lockless submission of +	 * all bios. +	 */ +	__extract_sorted_bios(tc); +} + +static void process_thin_deferred_bios(struct thin_c *tc) +{ +	struct pool *pool = tc->pool;  	unsigned long flags;  	struct bio *bio;  	struct bio_list bios; +	struct blk_plug plug; + +	if (tc->requeue_mode) { +		requeue_bio_list(tc, &tc->deferred_bio_list); +		return; +	}  	bio_list_init(&bios); -	spin_lock_irqsave(&pool->lock, flags); -	bio_list_merge(&bios, &pool->deferred_bios); -	bio_list_init(&pool->deferred_bios); -	spin_unlock_irqrestore(&pool->lock, flags); +	spin_lock_irqsave(&tc->lock, flags); -	while ((bio = bio_list_pop(&bios))) { -		struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); -		struct thin_c *tc = h->tc; +	if (bio_list_empty(&tc->deferred_bio_list)) { +		spin_unlock_irqrestore(&tc->lock, flags); +		return; +	} + +	__sort_thin_deferred_bios(tc); + +	bio_list_merge(&bios, &tc->deferred_bio_list); +	bio_list_init(&tc->deferred_bio_list); + +	spin_unlock_irqrestore(&tc->lock, flags); +	blk_start_plug(&plug); +	while ((bio = bio_list_pop(&bios))) {  		/*  		 * If we've got no free new_mapping structs, and processing  		 * this bio might require one, we pause until there are some  		 * prepared mappings to process.  		 */  		if (ensure_next_mapping(pool)) { -			spin_lock_irqsave(&pool->lock, flags); -			bio_list_merge(&pool->deferred_bios, &bios); -			spin_unlock_irqrestore(&pool->lock, flags); - +			spin_lock_irqsave(&tc->lock, flags); +			bio_list_add(&tc->deferred_bio_list, bio); +			bio_list_merge(&tc->deferred_bio_list, &bios); +			spin_unlock_irqrestore(&tc->lock, flags);  			break;  		} @@ -1335,6 +1504,60 @@ static void process_deferred_bios(struct pool *pool)  		else  			pool->process_bio(tc, bio);  	} +	blk_finish_plug(&plug); +} + +static void thin_get(struct thin_c *tc); +static void thin_put(struct thin_c *tc); + +/* + * We can't hold rcu_read_lock() around code that can block.  So we + * find a thin with the rcu lock held; bump a refcount; then drop + * the lock. + */ +static struct thin_c *get_first_thin(struct pool *pool) +{ +	struct thin_c *tc = NULL; + +	rcu_read_lock(); +	if (!list_empty(&pool->active_thins)) { +		tc = list_entry_rcu(pool->active_thins.next, struct thin_c, list); +		thin_get(tc); +	} +	rcu_read_unlock(); + +	return tc; +} + +static struct thin_c *get_next_thin(struct pool *pool, struct thin_c *tc) +{ +	struct thin_c *old_tc = tc; + +	rcu_read_lock(); +	list_for_each_entry_continue_rcu(tc, &pool->active_thins, list) { +		thin_get(tc); +		thin_put(old_tc); +		rcu_read_unlock(); +		return tc; +	} +	thin_put(old_tc); +	rcu_read_unlock(); + +	return NULL; +} + +static void process_deferred_bios(struct pool *pool) +{ +	unsigned long flags; +	struct bio *bio; +	struct bio_list bios; +	struct thin_c *tc; + +	tc = get_first_thin(pool); +	while (tc) { +		process_thin_deferred_bios(tc); +		tc = get_next_thin(pool, tc); +	}  	/*  	 * If there are any deferred flush bios, we must commit @@ -1346,10 +1569,11 @@ static void process_deferred_bios(struct pool *pool)  	bio_list_init(&pool->deferred_flush_bios);  	spin_unlock_irqrestore(&pool->lock, flags); -	if (bio_list_empty(&bios) && !need_commit_due_to_time(pool)) +	if (bio_list_empty(&bios) && +	    !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool)))  		return; -	if (commit_or_fallback(pool)) { +	if (commit(pool)) {  		while ((bio = bio_list_pop(&bios)))  			bio_io_error(bio);  		return; @@ -1380,6 +1604,81 @@ static void do_waker(struct work_struct *ws)  	queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD);  } +/* + * We're holding onto IO to allow userland time to react.  After the + * timeout either the pool will have been resized (and thus back in + * PM_WRITE mode), or we degrade to PM_READ_ONLY and start erroring IO. + */ +static void do_no_space_timeout(struct work_struct *ws) +{ +	struct pool *pool = container_of(to_delayed_work(ws), struct pool, +					 no_space_timeout); + +	if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) +		set_pool_mode(pool, PM_READ_ONLY); +} + +/*----------------------------------------------------------------*/ + +struct pool_work { +	struct work_struct worker; +	struct completion complete; +}; + +static struct pool_work *to_pool_work(struct work_struct *ws) +{ +	return container_of(ws, struct pool_work, worker); +} + +static void pool_work_complete(struct pool_work *pw) +{ +	complete(&pw->complete); +} + +static void pool_work_wait(struct pool_work *pw, struct pool *pool, +			   void (*fn)(struct work_struct *)) +{ +	INIT_WORK_ONSTACK(&pw->worker, fn); +	init_completion(&pw->complete); +	queue_work(pool->wq, &pw->worker); +	wait_for_completion(&pw->complete); +} + +/*----------------------------------------------------------------*/ + +struct noflush_work { +	struct pool_work pw; +	struct thin_c *tc; +}; + +static struct noflush_work *to_noflush(struct work_struct *ws) +{ +	return container_of(to_pool_work(ws), struct noflush_work, pw); +} + +static void do_noflush_start(struct work_struct *ws) +{ +	struct noflush_work *w = to_noflush(ws); +	w->tc->requeue_mode = true; +	requeue_io(w->tc); +	pool_work_complete(&w->pw); +} + +static void do_noflush_stop(struct work_struct *ws) +{ +	struct noflush_work *w = to_noflush(ws); +	w->tc->requeue_mode = false; +	pool_work_complete(&w->pw); +} + +static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *)) +{ +	struct noflush_work w; + +	w.tc = tc; +	pool_work_wait(&w.pw, tc->pool, fn); +} +  /*----------------------------------------------------------------*/  static enum pool_mode get_pool_mode(struct pool *pool) @@ -1387,46 +1686,127 @@ static enum pool_mode get_pool_mode(struct pool *pool)  	return pool->pf.mode;  } -static void set_pool_mode(struct pool *pool, enum pool_mode mode) +static void notify_of_pool_mode_change(struct pool *pool, const char *new_mode)  { -	int r; +	dm_table_event(pool->ti->table); +	DMINFO("%s: switching pool to %s mode", +	       dm_device_name(pool->pool_md), new_mode); +} -	pool->pf.mode = mode; +static void set_pool_mode(struct pool *pool, enum pool_mode new_mode) +{ +	struct pool_c *pt = pool->ti->private; +	bool needs_check = dm_pool_metadata_needs_check(pool->pmd); +	enum pool_mode old_mode = get_pool_mode(pool); +	unsigned long no_space_timeout = ACCESS_ONCE(no_space_timeout_secs) * HZ; -	switch (mode) { -	case PM_FAIL: -		DMERR("%s: switching pool to failure mode", +	/* +	 * Never allow the pool to transition to PM_WRITE mode if user +	 * intervention is required to verify metadata and data consistency. +	 */ +	if (new_mode == PM_WRITE && needs_check) { +		DMERR("%s: unable to switch pool to write mode until repaired.",  		      dm_device_name(pool->pool_md)); +		if (old_mode != new_mode) +			new_mode = old_mode; +		else +			new_mode = PM_READ_ONLY; +	} +	/* +	 * If we were in PM_FAIL mode, rollback of metadata failed.  We're +	 * not going to recover without a thin_repair.	So we never let the +	 * pool move out of the old mode. +	 */ +	if (old_mode == PM_FAIL) +		new_mode = old_mode; + +	switch (new_mode) { +	case PM_FAIL: +		if (old_mode != new_mode) +			notify_of_pool_mode_change(pool, "failure"); +		dm_pool_metadata_read_only(pool->pmd);  		pool->process_bio = process_bio_fail;  		pool->process_discard = process_bio_fail;  		pool->process_prepared_mapping = process_prepared_mapping_fail;  		pool->process_prepared_discard = process_prepared_discard_fail; + +		error_retry_list(pool);  		break;  	case PM_READ_ONLY: -		DMERR("%s: switching pool to read-only mode", -		      dm_device_name(pool->pool_md)); -		r = dm_pool_abort_metadata(pool->pmd); -		if (r) { -			DMERR("%s: aborting transaction failed", -			      dm_device_name(pool->pool_md)); -			set_pool_mode(pool, PM_FAIL); -		} else { -			dm_pool_metadata_read_only(pool->pmd); -			pool->process_bio = process_bio_read_only; -			pool->process_discard = process_discard; -			pool->process_prepared_mapping = process_prepared_mapping_fail; -			pool->process_prepared_discard = process_prepared_discard_passdown; -		} +		if (old_mode != new_mode) +			notify_of_pool_mode_change(pool, "read-only"); +		dm_pool_metadata_read_only(pool->pmd); +		pool->process_bio = process_bio_read_only; +		pool->process_discard = process_bio_success; +		pool->process_prepared_mapping = process_prepared_mapping_fail; +		pool->process_prepared_discard = process_prepared_discard_passdown; + +		error_retry_list(pool); +		break; + +	case PM_OUT_OF_DATA_SPACE: +		/* +		 * Ideally we'd never hit this state; the low water mark +		 * would trigger userland to extend the pool before we +		 * completely run out of data space.  However, many small +		 * IOs to unprovisioned space can consume data space at an +		 * alarming rate.  Adjust your low water mark if you're +		 * frequently seeing this mode. +		 */ +		if (old_mode != new_mode) +			notify_of_pool_mode_change(pool, "out-of-data-space"); +		pool->process_bio = process_bio_read_only; +		pool->process_discard = process_discard; +		pool->process_prepared_mapping = process_prepared_mapping; +		pool->process_prepared_discard = process_prepared_discard_passdown; + +		if (!pool->pf.error_if_no_space && no_space_timeout) +			queue_delayed_work(pool->wq, &pool->no_space_timeout, no_space_timeout);  		break;  	case PM_WRITE: +		if (old_mode != new_mode) +			notify_of_pool_mode_change(pool, "write"); +		dm_pool_metadata_read_write(pool->pmd);  		pool->process_bio = process_bio;  		pool->process_discard = process_discard;  		pool->process_prepared_mapping = process_prepared_mapping;  		pool->process_prepared_discard = process_prepared_discard;  		break;  	} + +	pool->pf.mode = new_mode; +	/* +	 * The pool mode may have changed, sync it so bind_control_target() +	 * doesn't cause an unexpected mode transition on resume. +	 */ +	pt->adjusted_pf.mode = new_mode; +} + +static void abort_transaction(struct pool *pool) +{ +	const char *dev_name = dm_device_name(pool->pool_md); + +	DMERR_LIMIT("%s: aborting current metadata transaction", dev_name); +	if (dm_pool_abort_metadata(pool->pmd)) { +		DMERR("%s: failed to abort metadata transaction", dev_name); +		set_pool_mode(pool, PM_FAIL); +	} + +	if (dm_pool_metadata_set_needs_check(pool->pmd)) { +		DMERR("%s: failed to set 'needs_check' flag in metadata", dev_name); +		set_pool_mode(pool, PM_FAIL); +	} +} + +static void metadata_operation_failed(struct pool *pool, const char *op, int r) +{ +	DMERR_LIMIT("%s: metadata operation '%s' failed: error = %d", +		    dm_device_name(pool->pool_md), op, r); + +	abort_transaction(pool); +	set_pool_mode(pool, PM_READ_ONLY);  }  /*----------------------------------------------------------------*/ @@ -1443,9 +1823,9 @@ static void thin_defer_bio(struct thin_c *tc, struct bio *bio)  	unsigned long flags;  	struct pool *pool = tc->pool; -	spin_lock_irqsave(&pool->lock, flags); -	bio_list_add(&pool->deferred_bios, bio); -	spin_unlock_irqrestore(&pool->lock, flags); +	spin_lock_irqsave(&tc->lock, flags); +	bio_list_add(&tc->deferred_bio_list, bio); +	spin_unlock_irqrestore(&tc->lock, flags);  	wake_worker(pool);  } @@ -1476,6 +1856,11 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)  	thin_hook_bio(tc, bio); +	if (tc->requeue_mode) { +		bio_endio(bio, DM_ENDIO_REQUEUE); +		return DM_MAPIO_SUBMITTED; +	} +  	if (get_pool_mode(tc->pool) == PM_FAIL) {  		bio_io_error(bio);  		return DM_MAPIO_SUBMITTED; @@ -1533,9 +1918,9 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)  		if (get_pool_mode(tc->pool) == PM_READ_ONLY) {  			/*  			 * This block isn't provisioned, and we have no way -			 * of doing so.  Just error it. +			 * of doing so.  			 */ -			bio_io_error(bio); +			handle_unserviceable_bio(tc->pool, bio);  			return DM_MAPIO_SUBMITTED;  		}  		/* fall through */ @@ -1561,26 +1946,29 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)  static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits)  { -	int r; -	unsigned long flags;  	struct pool_c *pt = container_of(cb, struct pool_c, callbacks); +	struct request_queue *q; -	spin_lock_irqsave(&pt->pool->lock, flags); -	r = !bio_list_empty(&pt->pool->retry_on_resume_list); -	spin_unlock_irqrestore(&pt->pool->lock, flags); +	if (get_pool_mode(pt->pool) == PM_OUT_OF_DATA_SPACE) +		return 1; -	if (!r) { -		struct request_queue *q = bdev_get_queue(pt->data_dev->bdev); -		r = bdi_congested(&q->backing_dev_info, bdi_bits); -	} - -	return r; +	q = bdev_get_queue(pt->data_dev->bdev); +	return bdi_congested(&q->backing_dev_info, bdi_bits);  } -static void __requeue_bios(struct pool *pool) +static void requeue_bios(struct pool *pool)  { -	bio_list_merge(&pool->deferred_bios, &pool->retry_on_resume_list); -	bio_list_init(&pool->retry_on_resume_list); +	unsigned long flags; +	struct thin_c *tc; + +	rcu_read_lock(); +	list_for_each_entry_rcu(tc, &pool->active_thins, list) { +		spin_lock_irqsave(&tc->lock, flags); +		bio_list_merge(&tc->deferred_bio_list, &tc->retry_on_resume_list); +		bio_list_init(&tc->retry_on_resume_list); +		spin_unlock_irqrestore(&tc->lock, flags); +	} +	rcu_read_unlock();  }  /*---------------------------------------------------------------- @@ -1637,17 +2025,21 @@ static int bind_control_target(struct pool *pool, struct dm_target *ti)  	struct pool_c *pt = ti->private;  	/* -	 * We want to make sure that degraded pools are never upgraded. +	 * We want to make sure that a pool in PM_FAIL mode is never upgraded.  	 */ -	enum pool_mode old_mode = pool->pf.mode; +	enum pool_mode old_mode = get_pool_mode(pool);  	enum pool_mode new_mode = pt->adjusted_pf.mode; -	if (old_mode > new_mode) -		new_mode = old_mode; +	/* +	 * Don't change the pool's mode until set_pool_mode() below. +	 * Otherwise the pool's process_* function pointers may +	 * not match the desired pool mode. +	 */ +	pt->adjusted_pf.mode = old_mode;  	pool->ti = ti; -	pool->low_water_blocks = pt->low_water_blocks;  	pool->pf = pt->adjusted_pf; +	pool->low_water_blocks = pt->low_water_blocks;  	set_pool_mode(pool, new_mode); @@ -1670,6 +2062,7 @@ static void pool_features_init(struct pool_features *pf)  	pf->zero_new_blocks = true;  	pf->discard_enabled = true;  	pf->discard_passdown = true; +	pf->error_if_no_space = false;  }  static void __pool_destroy(struct pool *pool) @@ -1755,14 +2148,13 @@ static struct pool *pool_create(struct mapped_device *pool_md,  	INIT_WORK(&pool->worker, do_worker);  	INIT_DELAYED_WORK(&pool->waker, do_waker); +	INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout);  	spin_lock_init(&pool->lock); -	bio_list_init(&pool->deferred_bios);  	bio_list_init(&pool->deferred_flush_bios);  	INIT_LIST_HEAD(&pool->prepared_mappings);  	INIT_LIST_HEAD(&pool->prepared_discards); -	pool->low_water_triggered = 0; -	pool->no_free_space = 0; -	bio_list_init(&pool->retry_on_resume_list); +	INIT_LIST_HEAD(&pool->active_thins); +	pool->low_water_triggered = false;  	pool->shared_read_ds = dm_deferred_set_create();  	if (!pool->shared_read_ds) { @@ -1886,7 +2278,7 @@ static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,  	const char *arg_name;  	static struct dm_arg _args[] = { -		{0, 3, "Invalid number of pool feature arguments"}, +		{0, 4, "Invalid number of pool feature arguments"},  	};  	/* @@ -1915,6 +2307,9 @@ static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,  		else if (!strcasecmp(arg_name, "read_only"))  			pf->mode = PM_READ_ONLY; +		else if (!strcasecmp(arg_name, "error_if_no_space")) +			pf->error_if_no_space = true; +  		else {  			ti->error = "Unrecognised pool feature requested";  			r = -EINVAL; @@ -1935,16 +2330,27 @@ static void metadata_low_callback(void *context)  	dm_table_event(pool->ti->table);  } -static sector_t get_metadata_dev_size(struct block_device *bdev) +static sector_t get_dev_size(struct block_device *bdev) +{ +	return i_size_read(bdev->bd_inode) >> SECTOR_SHIFT; +} + +static void warn_if_metadata_device_too_big(struct block_device *bdev)  { -	sector_t metadata_dev_size = i_size_read(bdev->bd_inode) >> SECTOR_SHIFT; +	sector_t metadata_dev_size = get_dev_size(bdev);  	char buffer[BDEVNAME_SIZE]; -	if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING) { +	if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING)  		DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",  		       bdevname(bdev, buffer), THIN_METADATA_MAX_SECTORS); -		metadata_dev_size = THIN_METADATA_MAX_SECTORS_WARNING; -	} +} + +static sector_t get_metadata_dev_size(struct block_device *bdev) +{ +	sector_t metadata_dev_size = get_dev_size(bdev); + +	if (metadata_dev_size > THIN_METADATA_MAX_SECTORS) +		metadata_dev_size = THIN_METADATA_MAX_SECTORS;  	return metadata_dev_size;  } @@ -1953,7 +2359,7 @@ static dm_block_t get_metadata_dev_size_in_blocks(struct block_device *bdev)  {  	sector_t metadata_dev_size = get_metadata_dev_size(bdev); -	sector_div(metadata_dev_size, THIN_METADATA_BLOCK_SIZE >> SECTOR_SHIFT); +	sector_div(metadata_dev_size, THIN_METADATA_BLOCK_SIZE);  	return metadata_dev_size;  } @@ -1985,6 +2391,8 @@ static dm_block_t calc_metadata_threshold(struct pool_c *pt)   *	     skip_block_zeroing: skips the zeroing of newly-provisioned blocks.   *	     ignore_discard: disable discard   *	     no_discard_passdown: don't pass discards down to the data device + *	     read_only: Don't allow any changes to be made to the pool metadata. + *	     error_if_no_space: error IOs, instead of queueing, if no space.   */  static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)  { @@ -2029,12 +2437,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)  		ti->error = "Error opening metadata block device";  		goto out_unlock;  	} - -	/* -	 * Run for the side-effect of possibly issuing a warning if the -	 * device is too big. -	 */ -	(void) get_metadata_dev_size(metadata_dev->bdev); +	warn_if_metadata_device_too_big(metadata_dev->bdev);  	r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &data_dev);  	if (r) { @@ -2095,6 +2498,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)  	 * them down to the data device.  The thin device's discard  	 * processing will cause mappings to be removed from the btree.  	 */ +	ti->discard_zeroes_data_unsupported = true;  	if (pf.discard_enabled && pf.discard_passdown) {  		ti->num_discard_bios = 1; @@ -2104,7 +2508,6 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)  		 * thin devices' discard limits consistent).  		 */  		ti->discards_supported = true; -		ti->discard_zeroes_data_unsupported = true;  	}  	ti->private = pt; @@ -2180,11 +2583,19 @@ static int maybe_resize_data_dev(struct dm_target *ti, bool *need_commit)  		return -EINVAL;  	} else if (data_size > sb_data_size) { +		if (dm_pool_metadata_needs_check(pool->pmd)) { +			DMERR("%s: unable to grow the data device until repaired.", +			      dm_device_name(pool->pool_md)); +			return 0; +		} + +		if (sb_data_size) +			DMINFO("%s: growing the data device from %llu to %llu blocks", +			       dm_device_name(pool->pool_md), +			       sb_data_size, (unsigned long long)data_size);  		r = dm_pool_resize_data_dev(pool->pmd, data_size);  		if (r) { -			DMERR("%s: failed to resize data device", -			      dm_device_name(pool->pool_md)); -			set_pool_mode(pool, PM_READ_ONLY); +			metadata_operation_failed(pool, "dm_pool_resize_data_dev", r);  			return r;  		} @@ -2219,10 +2630,19 @@ static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit)  		return -EINVAL;  	} else if (metadata_dev_size > sb_metadata_dev_size) { +		if (dm_pool_metadata_needs_check(pool->pmd)) { +			DMERR("%s: unable to grow the metadata device until repaired.", +			      dm_device_name(pool->pool_md)); +			return 0; +		} + +		warn_if_metadata_device_too_big(pool->md_dev); +		DMINFO("%s: growing the metadata device from %llu to %llu blocks", +		       dm_device_name(pool->pool_md), +		       sb_metadata_dev_size, metadata_dev_size);  		r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size);  		if (r) { -			DMERR("%s: failed to resize metadata device", -			      dm_device_name(pool->pool_md)); +			metadata_operation_failed(pool, "dm_pool_resize_metadata_dev", r);  			return r;  		} @@ -2266,7 +2686,7 @@ static int pool_preresume(struct dm_target *ti)  		return r;  	if (need_commit1 || need_commit2) -		(void) commit_or_fallback(pool); +		(void) commit(pool);  	return 0;  } @@ -2278,10 +2698,9 @@ static void pool_resume(struct dm_target *ti)  	unsigned long flags;  	spin_lock_irqsave(&pool->lock, flags); -	pool->low_water_triggered = 0; -	pool->no_free_space = 0; -	__requeue_bios(pool); +	pool->low_water_triggered = false;  	spin_unlock_irqrestore(&pool->lock, flags); +	requeue_bios(pool);  	do_waker(&pool->waker.work);  } @@ -2292,8 +2711,9 @@ static void pool_postsuspend(struct dm_target *ti)  	struct pool *pool = pt->pool;  	cancel_delayed_work(&pool->waker); +	cancel_delayed_work(&pool->no_space_timeout);  	flush_workqueue(pool->wq); -	(void) commit_or_fallback(pool); +	(void) commit(pool);  }  static int check_arg_count(unsigned argc, unsigned args_required) @@ -2427,7 +2847,7 @@ static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct  	if (r)  		return r; -	(void) commit_or_fallback(pool); +	(void) commit(pool);  	r = dm_pool_reserve_metadata_snap(pool->pmd);  	if (r) @@ -2489,7 +2909,7 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv)  		DMWARN("Unrecognised thin pool target message received: %s", argv[0]);  	if (!r) -		(void) commit_or_fallback(pool); +		(void) commit(pool);  	return r;  } @@ -2498,7 +2918,8 @@ static void emit_flags(struct pool_features *pf, char *result,  		       unsigned sz, unsigned maxlen)  {  	unsigned count = !pf->zero_new_blocks + !pf->discard_enabled + -		!pf->discard_passdown + (pf->mode == PM_READ_ONLY); +		!pf->discard_passdown + (pf->mode == PM_READ_ONLY) + +		pf->error_if_no_space;  	DMEMIT("%u ", count);  	if (!pf->zero_new_blocks) @@ -2512,6 +2933,9 @@ static void emit_flags(struct pool_features *pf, char *result,  	if (pf->mode == PM_READ_ONLY)  		DMEMIT("read_only "); + +	if (pf->error_if_no_space) +		DMEMIT("error_if_no_space ");  }  /* @@ -2544,7 +2968,7 @@ static void pool_status(struct dm_target *ti, status_type_t type,  		/* Commit to ensure statistics aren't out-of-date */  		if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti)) -			(void) commit_or_fallback(pool); +			(void) commit(pool);  		r = dm_pool_get_metadata_transaction_id(pool->pmd, &transaction_id);  		if (r) { @@ -2600,17 +3024,24 @@ static void pool_status(struct dm_target *ti, status_type_t type,  		else  			DMEMIT("- "); -		if (pool->pf.mode == PM_READ_ONLY) +		if (pool->pf.mode == PM_OUT_OF_DATA_SPACE) +			DMEMIT("out_of_data_space "); +		else if (pool->pf.mode == PM_READ_ONLY)  			DMEMIT("ro ");  		else  			DMEMIT("rw ");  		if (!pool->pf.discard_enabled) -			DMEMIT("ignore_discard"); +			DMEMIT("ignore_discard ");  		else if (pool->pf.discard_passdown) -			DMEMIT("discard_passdown"); +			DMEMIT("discard_passdown "); +		else +			DMEMIT("no_discard_passdown "); + +		if (pool->pf.error_if_no_space) +			DMEMIT("error_if_no_space ");  		else -			DMEMIT("no_discard_passdown"); +			DMEMIT("queue_if_no_space ");  		break; @@ -2663,7 +3094,8 @@ static void set_discard_limits(struct pool_c *pt, struct queue_limits *limits)  	 */  	if (pt->adjusted_pf.discard_passdown) {  		data_limits = &bdev_get_queue(pt->data_dev->bdev)->limits; -		limits->discard_granularity = data_limits->discard_granularity; +		limits->discard_granularity = max(data_limits->discard_granularity, +						  pool->sectors_per_block << SECTOR_SHIFT);  	} else  		limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;  } @@ -2689,8 +3121,16 @@ static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)  	 * They get transferred to the live pool in bind_control_target()  	 * called from pool_preresume().  	 */ -	if (!pt->adjusted_pf.discard_enabled) +	if (!pt->adjusted_pf.discard_enabled) { +		/* +		 * Must explicitly disallow stacking discard limits otherwise the +		 * block layer will stack them if pool's data device has support. +		 * QUEUE_FLAG_DISCARD wouldn't be set but there is no way for the +		 * user to see that, so make sure to set all discard limits to 0. +		 */ +		limits->discard_granularity = 0;  		return; +	}  	disable_passdown_if_not_supported(pt); @@ -2701,7 +3141,7 @@ static struct target_type pool_target = {  	.name = "thin-pool",  	.features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |  		    DM_TARGET_IMMUTABLE, -	.version = {1, 9, 0}, +	.version = {1, 12, 0},  	.module = THIS_MODULE,  	.ctr = pool_ctr,  	.dtr = pool_dtr, @@ -2719,9 +3159,29 @@ static struct target_type pool_target = {  /*----------------------------------------------------------------   * Thin target methods   *--------------------------------------------------------------*/ +static void thin_get(struct thin_c *tc) +{ +	atomic_inc(&tc->refcount); +} + +static void thin_put(struct thin_c *tc) +{ +	if (atomic_dec_and_test(&tc->refcount)) +		complete(&tc->can_destroy); +} +  static void thin_dtr(struct dm_target *ti)  {  	struct thin_c *tc = ti->private; +	unsigned long flags; + +	thin_put(tc); +	wait_for_completion(&tc->can_destroy); + +	spin_lock_irqsave(&tc->pool->lock, flags); +	list_del_rcu(&tc->list); +	spin_unlock_irqrestore(&tc->pool->lock, flags); +	synchronize_rcu();  	mutex_lock(&dm_thin_pool_table.mutex); @@ -2753,6 +3213,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)  	struct thin_c *tc;  	struct dm_dev *pool_dev, *origin_dev;  	struct mapped_device *pool_md; +	unsigned long flags;  	mutex_lock(&dm_thin_pool_table.mutex); @@ -2768,6 +3229,10 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)  		r = -ENOMEM;  		goto out_unlock;  	} +	spin_lock_init(&tc->lock); +	bio_list_init(&tc->deferred_bio_list); +	bio_list_init(&tc->retry_on_resume_list); +	tc->sort_bio_list = RB_ROOT;  	if (argc == 3) {  		r = dm_get_device(ti, argv[2], FMODE_READ, &origin_dev); @@ -2808,6 +3273,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)  	if (get_pool_mode(tc->pool) == PM_FAIL) {  		ti->error = "Couldn't open thin device, Pool is in fail mode"; +		r = -EINVAL;  		goto bad_thin_open;  	} @@ -2819,17 +3285,17 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)  	r = dm_set_target_max_io_len(ti, tc->pool->sectors_per_block);  	if (r) -		goto bad_thin_open; +		goto bad_target_max_io_len;  	ti->num_flush_bios = 1;  	ti->flush_supported = true;  	ti->per_bio_data_size = sizeof(struct dm_thin_endio_hook);  	/* In case the pool supports discards, pass them on. */ +	ti->discard_zeroes_data_unsupported = true;  	if (tc->pool->pf.discard_enabled) {  		ti->discards_supported = true;  		ti->num_discard_bios = 1; -		ti->discard_zeroes_data_unsupported = true;  		/* Discard bios must be split on a block boundary */  		ti->split_discard_bios = true;  	} @@ -2838,8 +3304,24 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)  	mutex_unlock(&dm_thin_pool_table.mutex); +	atomic_set(&tc->refcount, 1); +	init_completion(&tc->can_destroy); + +	spin_lock_irqsave(&tc->pool->lock, flags); +	list_add_tail_rcu(&tc->list, &tc->pool->active_thins); +	spin_unlock_irqrestore(&tc->pool->lock, flags); +	/* +	 * This synchronize_rcu() call is needed here otherwise we risk a +	 * wake_worker() call finding no bios to process (because the newly +	 * added tc isn't yet visible).  So this reduces latency since we +	 * aren't then dependent on the periodic commit to wake_worker(). +	 */ +	synchronize_rcu(); +  	return 0; +bad_target_max_io_len: +	dm_pool_close_thin_device(tc->td);  bad_thin_open:  	__pool_dec(tc->pool);  bad_pool_lookup: @@ -2859,7 +3341,7 @@ out_unlock:  static int thin_map(struct dm_target *ti, struct bio *bio)  { -	bio->bi_sector = dm_target_offset(ti, bio->bi_sector); +	bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);  	return thin_bio_map(ti, bio);  } @@ -2879,7 +3361,7 @@ static int thin_endio(struct dm_target *ti, struct bio *bio, int err)  		spin_lock_irqsave(&pool->lock, flags);  		list_for_each_entry_safe(m, tmp, &work, list) {  			list_del(&m->list); -			m->quiesced = 1; +			m->quiesced = true;  			__maybe_add_mapping(m);  		}  		spin_unlock_irqrestore(&pool->lock, flags); @@ -2891,7 +3373,7 @@ static int thin_endio(struct dm_target *ti, struct bio *bio, int err)  		if (!list_empty(&work)) {  			spin_lock_irqsave(&pool->lock, flags);  			list_for_each_entry_safe(m, tmp, &work, list) -				list_add(&m->list, &pool->prepared_discards); +				list_add_tail(&m->list, &pool->prepared_discards);  			spin_unlock_irqrestore(&pool->lock, flags);  			wake_worker(pool);  		} @@ -2900,10 +3382,23 @@ static int thin_endio(struct dm_target *ti, struct bio *bio, int err)  	return 0;  } -static void thin_postsuspend(struct dm_target *ti) +static void thin_presuspend(struct dm_target *ti)  { +	struct thin_c *tc = ti->private; +  	if (dm_noflush_suspending(ti)) -		requeue_io((struct thin_c *)ti->private); +		noflush_work(tc, do_noflush_start); +} + +static void thin_postsuspend(struct dm_target *ti) +{ +	struct thin_c *tc = ti->private; + +	/* +	 * The dm_noflush_suspending flag has been cleared by now, so +	 * unfortunately we must always run this. +	 */ +	noflush_work(tc, do_noflush_stop);  }  /* @@ -2988,12 +3483,13 @@ static int thin_iterate_devices(struct dm_target *ti,  static struct target_type thin_target = {  	.name = "thin", -	.version = {1, 9, 0}, +	.version = {1, 12, 0},  	.module	= THIS_MODULE,  	.ctr = thin_ctr,  	.dtr = thin_dtr,  	.map = thin_map,  	.end_io = thin_endio, +	.presuspend = thin_presuspend,  	.postsuspend = thin_postsuspend,  	.status = thin_status,  	.iterate_devices = thin_iterate_devices, @@ -3042,6 +3538,9 @@ static void dm_thin_exit(void)  module_init(dm_thin_init);  module_exit(dm_thin_exit); +module_param_named(no_space_timeout, no_space_timeout_secs, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(no_space_timeout, "Out of data space queue IO timeout in seconds"); +  MODULE_DESCRIPTION(DM_NAME " thin provisioning target");  MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");  MODULE_LICENSE("GPL"); diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c index 4b7941db3af..7a7bab8947a 100644 --- a/drivers/md/dm-verity.c +++ b/drivers/md/dm-verity.c @@ -73,15 +73,10 @@ struct dm_verity_io {  	sector_t block;  	unsigned n_blocks; -	/* saved bio vector */ -	struct bio_vec *io_vec; -	unsigned io_vec_size; +	struct bvec_iter iter;  	struct work_struct work; -	/* A space for short vectors; longer vectors are allocated separately. */ -	struct bio_vec io_vec_inline[DM_VERITY_IO_VEC_INLINE]; -  	/*  	 * Three variably-size fields follow this struct:  	 * @@ -284,9 +279,10 @@ release_ret_r:  static int verity_verify_io(struct dm_verity_io *io)  {  	struct dm_verity *v = io->v; +	struct bio *bio = dm_bio_from_per_bio_data(io, +						   v->ti->per_bio_data_size);  	unsigned b;  	int i; -	unsigned vector = 0, offset = 0;  	for (b = 0; b < io->n_blocks; b++) {  		struct shash_desc *desc; @@ -334,31 +330,25 @@ test_block_hash:  				return r;  			}  		} -  		todo = 1 << v->data_dev_block_bits;  		do { -			struct bio_vec *bv;  			u8 *page;  			unsigned len; +			struct bio_vec bv = bio_iter_iovec(bio, io->iter); -			BUG_ON(vector >= io->io_vec_size); -			bv = &io->io_vec[vector]; -			page = kmap_atomic(bv->bv_page); -			len = bv->bv_len - offset; +			page = kmap_atomic(bv.bv_page); +			len = bv.bv_len;  			if (likely(len >= todo))  				len = todo; -			r = crypto_shash_update(desc, -					page + bv->bv_offset + offset, len); +			r = crypto_shash_update(desc, page + bv.bv_offset, len);  			kunmap_atomic(page); +  			if (r < 0) {  				DMERR("crypto_shash_update failed: %d", r);  				return r;  			} -			offset += len; -			if (likely(offset == bv->bv_len)) { -				offset = 0; -				vector++; -			} + +			bio_advance_iter(bio, &io->iter, len);  			todo -= len;  		} while (todo); @@ -383,8 +373,6 @@ test_block_hash:  			return -EIO;  		}  	} -	BUG_ON(vector != io->io_vec_size); -	BUG_ON(offset);  	return 0;  } @@ -400,10 +388,7 @@ static void verity_finish_io(struct dm_verity_io *io, int error)  	bio->bi_end_io = io->orig_bi_end_io;  	bio->bi_private = io->orig_bi_private; -	if (io->io_vec != io->io_vec_inline) -		mempool_free(io->io_vec, v->vec_mempool); - -	bio_endio(bio, error); +	bio_endio_nodec(bio, error);  }  static void verity_work(struct work_struct *w) @@ -493,9 +478,9 @@ static int verity_map(struct dm_target *ti, struct bio *bio)  	struct dm_verity_io *io;  	bio->bi_bdev = v->data_dev->bdev; -	bio->bi_sector = verity_map_sector(v, bio->bi_sector); +	bio->bi_iter.bi_sector = verity_map_sector(v, bio->bi_iter.bi_sector); -	if (((unsigned)bio->bi_sector | bio_sectors(bio)) & +	if (((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) &  	    ((1 << (v->data_dev_block_bits - SECTOR_SHIFT)) - 1)) {  		DMERR_LIMIT("unaligned io");  		return -EIO; @@ -514,18 +499,12 @@ static int verity_map(struct dm_target *ti, struct bio *bio)  	io->v = v;  	io->orig_bi_end_io = bio->bi_end_io;  	io->orig_bi_private = bio->bi_private; -	io->block = bio->bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT); -	io->n_blocks = bio->bi_size >> v->data_dev_block_bits; +	io->block = bio->bi_iter.bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT); +	io->n_blocks = bio->bi_iter.bi_size >> v->data_dev_block_bits;  	bio->bi_end_io = verity_end_io;  	bio->bi_private = io; -	io->io_vec_size = bio_segments(bio); -	if (io->io_vec_size < DM_VERITY_IO_VEC_INLINE) -		io->io_vec = io->io_vec_inline; -	else -		io->io_vec = mempool_alloc(v->vec_mempool, GFP_NOIO); -	memcpy(io->io_vec, bio_iovec(bio), -	       io->io_vec_size * sizeof(struct bio_vec)); +	io->iter = bio->bi_iter;  	verity_submit_prefetch(v, io); diff --git a/drivers/md/dm-zero.c b/drivers/md/dm-zero.c index c99003e0d47..b9a64bbce30 100644 --- a/drivers/md/dm-zero.c +++ b/drivers/md/dm-zero.c @@ -1,5 +1,5 @@  /* - * Copyright (C) 2003 Christophe Saout <christophe@saout.de> + * Copyright (C) 2003 Jana Saout <jana@saout.de>   *   * This file is released under the GPL.   */ @@ -79,6 +79,6 @@ static void __exit dm_zero_exit(void)  module_init(dm_zero_init)  module_exit(dm_zero_exit) -MODULE_AUTHOR("Christophe Saout <christophe@saout.de>"); +MODULE_AUTHOR("Jana Saout <jana@saout.de>");  MODULE_DESCRIPTION(DM_NAME " dummy target returning zeros");  MODULE_LICENSE("GPL"); diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 6a5e9ed2fcc..32b958dbc49 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -49,6 +49,13 @@ static unsigned int _major = 0;  static DEFINE_IDR(_minor_idr);  static DEFINE_SPINLOCK(_minor_lock); + +static void do_deferred_remove(struct work_struct *w); + +static DECLARE_WORK(deferred_remove_work, do_deferred_remove); + +static struct workqueue_struct *deferred_remove_workqueue; +  /*   * For bio-based dm.   * One of these is allocated per bio. @@ -89,13 +96,6 @@ struct dm_rq_clone_bio_info {  	struct bio clone;  }; -union map_info *dm_get_mapinfo(struct bio *bio) -{ -	if (bio && bio->bi_private) -		return &((struct dm_target_io *)bio->bi_private)->info; -	return NULL; -} -  union map_info *dm_get_rq_mapinfo(struct request *rq)  {  	if (rq && rq->end_io_data) @@ -116,6 +116,7 @@ EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo);  #define DMF_DELETING 4  #define DMF_NOFLUSH_SUSPENDING 5  #define DMF_MERGE_IS_OPTIONAL 6 +#define DMF_DEFERRED_REMOVE 7  /*   * A dummy definition to make RCU happy. @@ -194,8 +195,8 @@ struct mapped_device {  	/* forced geometry settings */  	struct hd_geometry geometry; -	/* sysfs handle */ -	struct kobject kobj; +	/* kobject and completion */ +	struct dm_kobject_holder kobj_holder;  	/* zero-length flush that will be cloned and submitted to targets */  	struct bio flush_bio; @@ -211,10 +212,55 @@ struct dm_md_mempools {  	struct bio_set *bs;  }; -#define MIN_IOS 256 +#define RESERVED_BIO_BASED_IOS		16 +#define RESERVED_REQUEST_BASED_IOS	256 +#define RESERVED_MAX_IOS		1024  static struct kmem_cache *_io_cache;  static struct kmem_cache *_rq_tio_cache; +/* + * Bio-based DM's mempools' reserved IOs set by the user. + */ +static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS; + +/* + * Request-based DM's mempools' reserved IOs set by the user. + */ +static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS; + +static unsigned __dm_get_reserved_ios(unsigned *reserved_ios, +				      unsigned def, unsigned max) +{ +	unsigned ios = ACCESS_ONCE(*reserved_ios); +	unsigned modified_ios = 0; + +	if (!ios) +		modified_ios = def; +	else if (ios > max) +		modified_ios = max; + +	if (modified_ios) { +		(void)cmpxchg(reserved_ios, ios, modified_ios); +		ios = modified_ios; +	} + +	return ios; +} + +unsigned dm_get_reserved_bio_based_ios(void) +{ +	return __dm_get_reserved_ios(&reserved_bio_based_ios, +				     RESERVED_BIO_BASED_IOS, RESERVED_MAX_IOS); +} +EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios); + +unsigned dm_get_reserved_rq_based_ios(void) +{ +	return __dm_get_reserved_ios(&reserved_rq_based_ios, +				     RESERVED_REQUEST_BASED_IOS, RESERVED_MAX_IOS); +} +EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios); +  static int __init local_init(void)  {  	int r = -ENOMEM; @@ -232,16 +278,24 @@ static int __init local_init(void)  	if (r)  		goto out_free_rq_tio_cache; +	deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1); +	if (!deferred_remove_workqueue) { +		r = -ENOMEM; +		goto out_uevent_exit; +	} +  	_major = major;  	r = register_blkdev(_major, _name);  	if (r < 0) -		goto out_uevent_exit; +		goto out_free_workqueue;  	if (!_major)  		_major = r;  	return 0; +out_free_workqueue: +	destroy_workqueue(deferred_remove_workqueue);  out_uevent_exit:  	dm_uevent_exit();  out_free_rq_tio_cache: @@ -254,6 +308,9 @@ out_free_io_cache:  static void local_exit(void)  { +	flush_scheduled_work(); +	destroy_workqueue(deferred_remove_workqueue); +  	kmem_cache_destroy(_rq_tio_cache);  	kmem_cache_destroy(_io_cache);  	unregister_blkdev(_major, _name); @@ -359,7 +416,10 @@ static void dm_blk_close(struct gendisk *disk, fmode_t mode)  	spin_lock(&_minor_lock); -	atomic_dec(&md->open_count); +	if (atomic_dec_and_test(&md->open_count) && +	    (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) +		queue_work(deferred_remove_workqueue, &deferred_remove_work); +  	dm_put(md);  	spin_unlock(&_minor_lock); @@ -373,14 +433,18 @@ int dm_open_count(struct mapped_device *md)  /*   * Guarantees nothing is using the device before it's deleted.   */ -int dm_lock_for_deletion(struct mapped_device *md) +int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred)  {  	int r = 0;  	spin_lock(&_minor_lock); -	if (dm_open_count(md)) +	if (dm_open_count(md)) {  		r = -EBUSY; +		if (mark_deferred) +			set_bit(DMF_DEFERRED_REMOVE, &md->flags); +	} else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags)) +		r = -EEXIST;  	else  		set_bit(DMF_DELETING, &md->flags); @@ -389,11 +453,37 @@ int dm_lock_for_deletion(struct mapped_device *md)  	return r;  } +int dm_cancel_deferred_remove(struct mapped_device *md) +{ +	int r = 0; + +	spin_lock(&_minor_lock); + +	if (test_bit(DMF_DELETING, &md->flags)) +		r = -EBUSY; +	else +		clear_bit(DMF_DEFERRED_REMOVE, &md->flags); + +	spin_unlock(&_minor_lock); + +	return r; +} + +static void do_deferred_remove(struct work_struct *w) +{ +	dm_deferred_remove(); +} +  sector_t dm_get_size(struct mapped_device *md)  {  	return get_capacity(md->disk);  } +struct request_queue *dm_get_md_queue(struct mapped_device *md) +{ +	return md->queue; +} +  struct dm_stats *dm_get_stats(struct mapped_device *md)  {  	return &md->stats; @@ -494,7 +584,7 @@ static void start_io_acct(struct dm_io *io)  		atomic_inc_return(&md->pending[rw]));  	if (unlikely(dm_stats_used(&md->stats))) -		dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_sector, +		dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector,  				    bio_sectors(bio), false, 0, &io->stats_aux);  } @@ -512,7 +602,7 @@ static void end_io_acct(struct dm_io *io)  	part_stat_unlock();  	if (unlikely(dm_stats_used(&md->stats))) -		dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_sector, +		dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector,  				    bio_sectors(bio), true, duration, &io->stats_aux);  	/* @@ -661,7 +751,7 @@ static void dec_pending(struct dm_io *io, int error)  		if (io_error == DM_ENDIO_REQUEUE)  			return; -		if ((bio->bi_rw & REQ_FLUSH) && bio->bi_size) { +		if ((bio->bi_rw & REQ_FLUSH) && bio->bi_iter.bi_size) {  			/*  			 * Preflush done for flush with data, reissue  			 * without REQ_FLUSH. @@ -676,10 +766,18 @@ static void dec_pending(struct dm_io *io, int error)  	}  } +static void disable_write_same(struct mapped_device *md) +{ +	struct queue_limits *limits = dm_get_queue_limits(md); + +	/* device doesn't really support WRITE SAME, disable it */ +	limits->max_write_same_sectors = 0; +} +  static void clone_endio(struct bio *bio, int error)  {  	int r = 0; -	struct dm_target_io *tio = bio->bi_private; +	struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);  	struct dm_io *io = tio->io;  	struct mapped_device *md = tio->io->md;  	dm_endio_fn endio = tio->ti->type->end_io; @@ -704,6 +802,10 @@ static void clone_endio(struct bio *bio, int error)  		}  	} +	if (unlikely(r == -EREMOTEIO && (bio->bi_rw & REQ_WRITE_SAME) && +		     !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors)) +		disable_write_same(md); +  	free_tio(md, tio);  	dec_pending(io, error);  } @@ -713,10 +815,11 @@ static void clone_endio(struct bio *bio, int error)   */  static void end_clone_bio(struct bio *clone, int error)  { -	struct dm_rq_clone_bio_info *info = clone->bi_private; +	struct dm_rq_clone_bio_info *info = +		container_of(clone, struct dm_rq_clone_bio_info, clone);  	struct dm_rq_target_io *tio = info->tio;  	struct bio *bio = info->orig; -	unsigned int nr_bytes = info->orig->bi_size; +	unsigned int nr_bytes = info->orig->bi_iter.bi_size;  	bio_put(clone); @@ -897,6 +1000,10 @@ static void dm_done(struct request *clone, int error, bool mapped)  			r = rq_end_io(tio->ti, clone, error, &tio->info);  	} +	if (unlikely(r == -EREMOTEIO && (clone->cmd_flags & REQ_WRITE_SAME) && +		     !clone->q->limits.max_write_same_sectors)) +		disable_write_same(tio->md); +  	if (r <= 0)  		/* The target wants to complete the I/O */  		dm_end_request(clone, r); @@ -1030,6 +1137,46 @@ int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)  }  EXPORT_SYMBOL_GPL(dm_set_target_max_io_len); +/* + * A target may call dm_accept_partial_bio only from the map routine.  It is + * allowed for all bio types except REQ_FLUSH. + * + * dm_accept_partial_bio informs the dm that the target only wants to process + * additional n_sectors sectors of the bio and the rest of the data should be + * sent in a next bio. + * + * A diagram that explains the arithmetics: + * +--------------------+---------------+-------+ + * |         1          |       2       |   3   | + * +--------------------+---------------+-------+ + * + * <-------------- *tio->len_ptr ---------------> + *                      <------- bi_size -------> + *                      <-- n_sectors --> + * + * Region 1 was already iterated over with bio_advance or similar function. + *	(it may be empty if the target doesn't use bio_advance) + * Region 2 is the remaining bio size that the target wants to process. + *	(it may be empty if region 1 is non-empty, although there is no reason + *	 to make it empty) + * The target requires that region 3 is to be sent in the next bio. + * + * If the target wants to receive multiple copies of the bio (via num_*bios, etc), + * the partially processed part (the sum of regions 1+2) must be the same for all + * copies of the bio. + */ +void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors) +{ +	struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); +	unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT; +	BUG_ON(bio->bi_rw & REQ_FLUSH); +	BUG_ON(bi_size > *tio->len_ptr); +	BUG_ON(n_sectors > bi_size); +	*tio->len_ptr -= bi_size - n_sectors; +	bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT; +} +EXPORT_SYMBOL_GPL(dm_accept_partial_bio); +  static void __map_bio(struct dm_target_io *tio)  {  	int r; @@ -1039,7 +1186,6 @@ static void __map_bio(struct dm_target_io *tio)  	struct dm_target *ti = tio->ti;  	clone->bi_end_io = clone_endio; -	clone->bi_private = tio;  	/*  	 * Map the clone.  If r == 0 we don't need to do @@ -1047,7 +1193,7 @@ static void __map_bio(struct dm_target_io *tio)  	 * this io.  	 */  	atomic_inc(&tio->io->io_count); -	sector = clone->bi_sector; +	sector = clone->bi_iter.bi_sector;  	r = ti->type->map(ti, clone);  	if (r == DM_MAPIO_REMAPPED) {  		/* the bio has been remapped so dispatch it */ @@ -1073,77 +1219,33 @@ struct clone_info {  	struct bio *bio;  	struct dm_io *io;  	sector_t sector; -	sector_t sector_count; -	unsigned short idx; +	unsigned sector_count;  }; -static void bio_setup_sector(struct bio *bio, sector_t sector, sector_t len) -{ -	bio->bi_sector = sector; -	bio->bi_size = to_bytes(len); -} - -static void bio_setup_bv(struct bio *bio, unsigned short idx, unsigned short bv_count) -{ -	bio->bi_idx = idx; -	bio->bi_vcnt = idx + bv_count; -	bio->bi_flags &= ~(1 << BIO_SEG_VALID); -} - -static void clone_bio_integrity(struct bio *bio, struct bio *clone, -				unsigned short idx, unsigned len, unsigned offset, -				unsigned trim) -{ -	if (!bio_integrity(bio)) -		return; - -	bio_integrity_clone(clone, bio, GFP_NOIO); - -	if (trim) -		bio_integrity_trim(clone, bio_sector_offset(bio, idx, offset), len); -} - -/* - * Creates a little bio that just does part of a bvec. - */ -static void clone_split_bio(struct dm_target_io *tio, struct bio *bio, -			    sector_t sector, unsigned short idx, -			    unsigned offset, unsigned len) +static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len)  { -	struct bio *clone = &tio->clone; -	struct bio_vec *bv = bio->bi_io_vec + idx; - -	*clone->bi_io_vec = *bv; - -	bio_setup_sector(clone, sector, len); - -	clone->bi_bdev = bio->bi_bdev; -	clone->bi_rw = bio->bi_rw; -	clone->bi_vcnt = 1; -	clone->bi_io_vec->bv_offset = offset; -	clone->bi_io_vec->bv_len = clone->bi_size; -	clone->bi_flags |= 1 << BIO_CLONED; - -	clone_bio_integrity(bio, clone, idx, len, offset, 1); +	bio->bi_iter.bi_sector = sector; +	bio->bi_iter.bi_size = to_bytes(len);  }  /*   * Creates a bio that consists of range of complete bvecs.   */  static void clone_bio(struct dm_target_io *tio, struct bio *bio, -		      sector_t sector, unsigned short idx, -		      unsigned short bv_count, unsigned len) +		      sector_t sector, unsigned len)  {  	struct bio *clone = &tio->clone; -	unsigned trim = 0; -	__bio_clone(clone, bio); -	bio_setup_sector(clone, sector, len); -	bio_setup_bv(clone, idx, bv_count); +	__bio_clone_fast(clone, bio); + +	if (bio_integrity(bio)) +		bio_integrity_clone(clone, bio, GFP_NOIO); -	if (idx != bio->bi_idx || clone->bi_size < bio->bi_size) -		trim = 1; -	clone_bio_integrity(bio, clone, idx, len, 0, trim); +	bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector)); +	clone->bi_iter.bi_size = to_bytes(len); + +	if (bio_integrity(bio)) +		bio_integrity_trim(clone, 0, len);  }  static struct dm_target_io *alloc_tio(struct clone_info *ci, @@ -1158,7 +1260,6 @@ static struct dm_target_io *alloc_tio(struct clone_info *ci,  	tio->io = ci->io;  	tio->ti = ti; -	memset(&tio->info, 0, sizeof(tio->info));  	tio->target_bio_nr = target_bio_nr;  	return tio; @@ -1166,25 +1267,27 @@ static struct dm_target_io *alloc_tio(struct clone_info *ci,  static void __clone_and_map_simple_bio(struct clone_info *ci,  				       struct dm_target *ti, -				       unsigned target_bio_nr, sector_t len) +				       unsigned target_bio_nr, unsigned *len)  {  	struct dm_target_io *tio = alloc_tio(ci, ti, ci->bio->bi_max_vecs, target_bio_nr);  	struct bio *clone = &tio->clone; +	tio->len_ptr = len; +  	/*  	 * Discard requests require the bio's inline iovecs be initialized.  	 * ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush  	 * and discard, so no need for concern about wasted bvec allocations.  	 */ -	 __bio_clone(clone, ci->bio); +	 __bio_clone_fast(clone, ci->bio);  	if (len) -		bio_setup_sector(clone, ci->sector, len); +		bio_setup_sector(clone, ci->sector, *len);  	__map_bio(tio);  }  static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti, -				  unsigned num_bios, sector_t len) +				  unsigned num_bios, unsigned *len)  {  	unsigned target_bio_nr; @@ -1199,16 +1302,13 @@ static int __send_empty_flush(struct clone_info *ci)  	BUG_ON(bio_has_data(ci->bio));  	while ((ti = dm_table_get_target(ci->map, target_nr++))) -		__send_duplicate_bios(ci, ti, ti->num_flush_bios, 0); +		__send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL);  	return 0;  }  static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti, -				     sector_t sector, int nr_iovecs, -				     unsigned short idx, unsigned short bv_count, -				     unsigned offset, unsigned len, -				     unsigned split_bvec) +				     sector_t sector, unsigned *len)  {  	struct bio *bio = ci->bio;  	struct dm_target_io *tio; @@ -1222,11 +1322,9 @@ static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti  		num_target_bios = ti->num_write_bios(ti, bio);  	for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) { -		tio = alloc_tio(ci, ti, nr_iovecs, target_bio_nr); -		if (split_bvec) -			clone_split_bio(tio, bio, sector, idx, offset, len); -		else -			clone_bio(tio, bio, sector, idx, bv_count, len); +		tio = alloc_tio(ci, ti, 0, target_bio_nr); +		tio->len_ptr = len; +		clone_bio(tio, bio, sector, *len);  		__map_bio(tio);  	}  } @@ -1255,7 +1353,7 @@ static int __send_changing_extent_only(struct clone_info *ci,  				       is_split_required_fn is_split_required)  {  	struct dm_target *ti; -	sector_t len; +	unsigned len;  	unsigned num_bios;  	do { @@ -1274,11 +1372,11 @@ static int __send_changing_extent_only(struct clone_info *ci,  			return -EOPNOTSUPP;  		if (is_split_required && !is_split_required(ti)) -			len = min(ci->sector_count, max_io_len_target_boundary(ci->sector, ti)); +			len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti));  		else -			len = min(ci->sector_count, max_io_len(ci->sector, ti)); +			len = min((sector_t)ci->sector_count, max_io_len(ci->sector, ti)); -		__send_duplicate_bios(ci, ti, num_bios, len); +		__send_duplicate_bios(ci, ti, num_bios, &len);  		ci->sector += len;  	} while (ci->sector_count -= len); @@ -1298,68 +1396,13 @@ static int __send_write_same(struct clone_info *ci)  }  /* - * Find maximum number of sectors / bvecs we can process with a single bio. - */ -static sector_t __len_within_target(struct clone_info *ci, sector_t max, int *idx) -{ -	struct bio *bio = ci->bio; -	sector_t bv_len, total_len = 0; - -	for (*idx = ci->idx; max && (*idx < bio->bi_vcnt); (*idx)++) { -		bv_len = to_sector(bio->bi_io_vec[*idx].bv_len); - -		if (bv_len > max) -			break; - -		max -= bv_len; -		total_len += bv_len; -	} - -	return total_len; -} - -static int __split_bvec_across_targets(struct clone_info *ci, -				       struct dm_target *ti, sector_t max) -{ -	struct bio *bio = ci->bio; -	struct bio_vec *bv = bio->bi_io_vec + ci->idx; -	sector_t remaining = to_sector(bv->bv_len); -	unsigned offset = 0; -	sector_t len; - -	do { -		if (offset) { -			ti = dm_table_find_target(ci->map, ci->sector); -			if (!dm_target_is_valid(ti)) -				return -EIO; - -			max = max_io_len(ci->sector, ti); -		} - -		len = min(remaining, max); - -		__clone_and_map_data_bio(ci, ti, ci->sector, 1, ci->idx, 0, -					 bv->bv_offset + offset, len, 1); - -		ci->sector += len; -		ci->sector_count -= len; -		offset += to_bytes(len); -	} while (remaining -= len); - -	ci->idx++; - -	return 0; -} - -/*   * Select the correct strategy for processing a non-flush bio.   */  static int __split_and_process_non_flush(struct clone_info *ci)  {  	struct bio *bio = ci->bio;  	struct dm_target *ti; -	sector_t len, max; -	int idx; +	unsigned len;  	if (unlikely(bio->bi_rw & REQ_DISCARD))  		return __send_discard(ci); @@ -1370,41 +1413,14 @@ static int __split_and_process_non_flush(struct clone_info *ci)  	if (!dm_target_is_valid(ti))  		return -EIO; -	max = max_io_len(ci->sector, ti); +	len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count); -	/* -	 * Optimise for the simple case where we can do all of -	 * the remaining io with a single clone. -	 */ -	if (ci->sector_count <= max) { -		__clone_and_map_data_bio(ci, ti, ci->sector, bio->bi_max_vecs, -					 ci->idx, bio->bi_vcnt - ci->idx, 0, -					 ci->sector_count, 0); -		ci->sector_count = 0; -		return 0; -	} +	__clone_and_map_data_bio(ci, ti, ci->sector, &len); -	/* -	 * There are some bvecs that don't span targets. -	 * Do as many of these as possible. -	 */ -	if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) { -		len = __len_within_target(ci, max, &idx); - -		__clone_and_map_data_bio(ci, ti, ci->sector, bio->bi_max_vecs, -					 ci->idx, idx - ci->idx, 0, len, 0); - -		ci->sector += len; -		ci->sector_count -= len; -		ci->idx = idx; +	ci->sector += len; +	ci->sector_count -= len; -		return 0; -	} - -	/* -	 * Handle a bvec that must be split between two or more targets. -	 */ -	return __split_bvec_across_targets(ci, ti, max); +	return 0;  }  /* @@ -1429,8 +1445,7 @@ static void __split_and_process_bio(struct mapped_device *md,  	ci.io->bio = bio;  	ci.io->md = md;  	spin_lock_init(&ci.io->endio_lock); -	ci.sector = bio->bi_sector; -	ci.idx = bio->bi_idx; +	ci.sector = bio->bi_iter.bi_sector;  	start_io_acct(ci.io); @@ -1494,7 +1509,6 @@ static int dm_merge_bvec(struct request_queue *q,  	 * just one page.  	 */  	else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9) -  		max_size = 0;  out: @@ -1582,7 +1596,6 @@ static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,  	info->orig = bio_orig;  	info->tio = tio;  	bio->bi_end_io = end_clone_bio; -	bio->bi_private = info;  	return 0;  } @@ -1600,7 +1613,6 @@ static int setup_clone(struct request *clone, struct request *rq,  	clone->cmd = rq->cmd;  	clone->cmd_len = rq->cmd_len;  	clone->sense = rq->sense; -	clone->buffer = rq->buffer;  	clone->end_io = end_clone_request;  	clone->end_io_data = tio; @@ -1960,6 +1972,7 @@ static struct mapped_device *alloc_dev(int minor)  	init_waitqueue_head(&md->wait);  	INIT_WORK(&md->work, dm_wq_work);  	init_waitqueue_head(&md->eventq); +	init_completion(&md->kobj_holder.completion);  	md->disk->major = _major;  	md->disk->first_minor = minor; @@ -2223,7 +2236,7 @@ static struct dm_table *__unbind(struct mapped_device *md)  		return NULL;  	dm_table_event_callback(map, NULL, NULL); -	rcu_assign_pointer(md->map, NULL); +	RCU_INIT_POINTER(md->map, NULL);  	dm_sync_table(md);  	return map; @@ -2278,6 +2291,17 @@ struct target_type *dm_get_immutable_target_type(struct mapped_device *md)  }  /* + * The queue_limits are only valid as long as you have a reference + * count on 'md'. + */ +struct queue_limits *dm_get_queue_limits(struct mapped_device *md) +{ +	BUG_ON(!atomic_read(&md->holders)); +	return &md->queue->limits; +} +EXPORT_SYMBOL_GPL(dm_get_queue_limits); + +/*   * Fully initialize a request-based queue (->elevator, ->request_fn, etc).   */  static int dm_init_request_based_queue(struct mapped_device *md) @@ -2491,7 +2515,7 @@ static void dm_wq_work(struct work_struct *work)  static void dm_queue_flush(struct mapped_device *md)  {  	clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); -	smp_mb__after_clear_bit(); +	smp_mb__after_atomic();  	queue_work(md->wq, &md->work);  } @@ -2810,20 +2834,14 @@ struct gendisk *dm_disk(struct mapped_device *md)  struct kobject *dm_kobject(struct mapped_device *md)  { -	return &md->kobj; +	return &md->kobj_holder.kobj;  } -/* - * struct mapped_device should not be exported outside of dm.c - * so use this check to verify that kobj is part of md structure - */  struct mapped_device *dm_get_from_kobject(struct kobject *kobj)  {  	struct mapped_device *md; -	md = container_of(kobj, struct mapped_device, kobj); -	if (&md->kobj != kobj) -		return NULL; +	md = container_of(kobj, struct mapped_device, kobj_holder.kobj);  	if (test_bit(DMF_FREEING, &md->flags) ||  	    dm_deleting_md(md)) @@ -2838,6 +2856,11 @@ int dm_suspended_md(struct mapped_device *md)  	return test_bit(DMF_SUSPENDED, &md->flags);  } +int dm_test_deferred_remove_flag(struct mapped_device *md) +{ +	return test_bit(DMF_DEFERRED_REMOVE, &md->flags); +} +  int dm_suspended(struct dm_target *ti)  {  	return dm_suspended_md(dm_table_get_md(ti->table)); @@ -2862,18 +2885,18 @@ struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, u  	if (type == DM_TYPE_BIO_BASED) {  		cachep = _io_cache; -		pool_size = 16; +		pool_size = dm_get_reserved_bio_based_ios();  		front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);  	} else if (type == DM_TYPE_REQUEST_BASED) {  		cachep = _rq_tio_cache; -		pool_size = MIN_IOS; +		pool_size = dm_get_reserved_rq_based_ios();  		front_pad = offsetof(struct dm_rq_clone_bio_info, clone);  		/* per_bio_data_size is not used. See __bind_mempools(). */  		WARN_ON(per_bio_data_size != 0);  	} else  		goto out; -	pools->io_pool = mempool_create_slab_pool(MIN_IOS, cachep); +	pools->io_pool = mempool_create_slab_pool(pool_size, cachep);  	if (!pools->io_pool)  		goto out; @@ -2914,8 +2937,6 @@ static const struct block_device_operations dm_blk_dops = {  	.owner = THIS_MODULE  }; -EXPORT_SYMBOL(dm_get_mapinfo); -  /*   * module hooks   */ @@ -2924,6 +2945,13 @@ module_exit(dm_exit);  module_param(major, uint, 0);  MODULE_PARM_DESC(major, "The major number of the device mapper"); + +module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools"); + +module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools"); +  MODULE_DESCRIPTION(DM_NAME " driver");  MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");  MODULE_LICENSE("GPL"); diff --git a/drivers/md/dm.h b/drivers/md/dm.h index 5e604cc7b4a..ed76126aac5 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h @@ -15,6 +15,8 @@  #include <linux/list.h>  #include <linux/blkdev.h>  #include <linux/hdreg.h> +#include <linux/completion.h> +#include <linux/kobject.h>  #include "dm-stats.h" @@ -71,7 +73,6 @@ unsigned dm_table_get_type(struct dm_table *t);  struct target_type *dm_table_get_immutable_target_type(struct dm_table *t);  bool dm_table_request_based(struct dm_table *t);  bool dm_table_supports_discards(struct dm_table *t); -int dm_table_alloc_md_mempools(struct dm_table *t);  void dm_table_free_md_mempools(struct dm_table *t);  struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t); @@ -129,6 +130,16 @@ int dm_deleting_md(struct mapped_device *md);  int dm_suspended_md(struct mapped_device *md);  /* + * Test if the device is scheduled for deferred remove. + */ +int dm_test_deferred_remove_flag(struct mapped_device *md); + +/* + * Try to remove devices marked for deferred removal. + */ +void dm_deferred_remove(void); + +/*   * The device-mapper can be driven through one of two interfaces;   * ioctl or filesystem, depending which patch you have applied.   */ @@ -138,12 +149,27 @@ void dm_interface_exit(void);  /*   * sysfs interface   */ +struct dm_kobject_holder { +	struct kobject kobj; +	struct completion completion; +}; + +static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj) +{ +	return &container_of(kobj, struct dm_kobject_holder, kobj)->completion; +} +  int dm_sysfs_init(struct mapped_device *md);  void dm_sysfs_exit(struct mapped_device *md);  struct kobject *dm_kobject(struct mapped_device *md);  struct mapped_device *dm_get_from_kobject(struct kobject *kobj);  /* + * The kobject helper + */ +void dm_kobject_release(struct kobject *kobj); + +/*   * Targets for linear and striped mappings   */  int dm_linear_init(void); @@ -158,9 +184,11 @@ void dm_stripe_exit(void);  void dm_destroy(struct mapped_device *md);  void dm_destroy_immediate(struct mapped_device *md);  int dm_open_count(struct mapped_device *md); -int dm_lock_for_deletion(struct mapped_device *md); +int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred); +int dm_cancel_deferred_remove(struct mapped_device *md);  int dm_request_based(struct mapped_device *md);  sector_t dm_get_size(struct mapped_device *md); +struct request_queue *dm_get_md_queue(struct mapped_device *md);  struct dm_stats *dm_get_stats(struct mapped_device *md);  int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, @@ -184,6 +212,9 @@ void dm_free_md_mempools(struct dm_md_mempools *pools);  /*   * Helpers that are used by DM core   */ +unsigned dm_get_reserved_bio_based_ios(void); +unsigned dm_get_reserved_rq_based_ios(void); +  static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen)  {  	return !maxlen || strlen(result) + 1 >= maxlen; diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c index 3193aefe982..e8b4574956c 100644 --- a/drivers/md/faulty.c +++ b/drivers/md/faulty.c @@ -74,8 +74,8 @@ static void faulty_fail(struct bio *bio, int error)  {  	struct bio *b = bio->bi_private; -	b->bi_size = bio->bi_size; -	b->bi_sector = bio->bi_sector; +	b->bi_iter.bi_size = bio->bi_iter.bi_size; +	b->bi_iter.bi_sector = bio->bi_iter.bi_sector;  	bio_put(bio); @@ -185,26 +185,31 @@ static void make_request(struct mddev *mddev, struct bio *bio)  			return;  		} -		if (check_sector(conf, bio->bi_sector, bio_end_sector(bio), WRITE)) +		if (check_sector(conf, bio->bi_iter.bi_sector, +				 bio_end_sector(bio), WRITE))  			failit = 1;  		if (check_mode(conf, WritePersistent)) { -			add_sector(conf, bio->bi_sector, WritePersistent); +			add_sector(conf, bio->bi_iter.bi_sector, +				   WritePersistent);  			failit = 1;  		}  		if (check_mode(conf, WriteTransient))  			failit = 1;  	} else {  		/* read request */ -		if (check_sector(conf, bio->bi_sector, bio_end_sector(bio), READ)) +		if (check_sector(conf, bio->bi_iter.bi_sector, +				 bio_end_sector(bio), READ))  			failit = 1;  		if (check_mode(conf, ReadTransient))  			failit = 1;  		if (check_mode(conf, ReadPersistent)) { -			add_sector(conf, bio->bi_sector, ReadPersistent); +			add_sector(conf, bio->bi_iter.bi_sector, +				   ReadPersistent);  			failit = 1;  		}  		if (check_mode(conf, ReadFixable)) { -			add_sector(conf, bio->bi_sector, ReadFixable); +			add_sector(conf, bio->bi_iter.bi_sector, +				   ReadFixable);  			failit = 1;  		}  	} diff --git a/drivers/md/linear.c b/drivers/md/linear.c index f03fabd2b37..56f534b4a2d 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c @@ -288,65 +288,65 @@ static int linear_stop (struct mddev *mddev)  static void linear_make_request(struct mddev *mddev, struct bio *bio)  { +	char b[BDEVNAME_SIZE];  	struct dev_info *tmp_dev; -	sector_t start_sector; +	struct bio *split; +	sector_t start_sector, end_sector, data_offset;  	if (unlikely(bio->bi_rw & REQ_FLUSH)) {  		md_flush_request(mddev, bio);  		return;  	} -	rcu_read_lock(); -	tmp_dev = which_dev(mddev, bio->bi_sector); -	start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors; - - -	if (unlikely(bio->bi_sector >= (tmp_dev->end_sector) -		     || (bio->bi_sector < start_sector))) { -		char b[BDEVNAME_SIZE]; - -		printk(KERN_ERR -		       "md/linear:%s: make_request: Sector %llu out of bounds on " -		       "dev %s: %llu sectors, offset %llu\n", -		       mdname(mddev), -		       (unsigned long long)bio->bi_sector, -		       bdevname(tmp_dev->rdev->bdev, b), -		       (unsigned long long)tmp_dev->rdev->sectors, -		       (unsigned long long)start_sector); -		rcu_read_unlock(); -		bio_io_error(bio); -		return; -	} -	if (unlikely(bio_end_sector(bio) > tmp_dev->end_sector)) { -		/* This bio crosses a device boundary, so we have to -		 * split it. -		 */ -		struct bio_pair *bp; -		sector_t end_sector = tmp_dev->end_sector; +	do { +		rcu_read_lock(); -		rcu_read_unlock(); - -		bp = bio_split(bio, end_sector - bio->bi_sector); +		tmp_dev = which_dev(mddev, bio->bi_iter.bi_sector); +		start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors; +		end_sector = tmp_dev->end_sector; +		data_offset = tmp_dev->rdev->data_offset; +		bio->bi_bdev = tmp_dev->rdev->bdev; -		linear_make_request(mddev, &bp->bio1); -		linear_make_request(mddev, &bp->bio2); -		bio_pair_release(bp); -		return; -	} -		     -	bio->bi_bdev = tmp_dev->rdev->bdev; -	bio->bi_sector = bio->bi_sector - start_sector -		+ tmp_dev->rdev->data_offset; -	rcu_read_unlock(); +		rcu_read_unlock(); -	if (unlikely((bio->bi_rw & REQ_DISCARD) && -		     !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) { -		/* Just ignore it */ -		bio_endio(bio, 0); -		return; -	} +		if (unlikely(bio->bi_iter.bi_sector >= end_sector || +			     bio->bi_iter.bi_sector < start_sector)) +			goto out_of_bounds; + +		if (unlikely(bio_end_sector(bio) > end_sector)) { +			/* This bio crosses a device boundary, so we have to +			 * split it. +			 */ +			split = bio_split(bio, end_sector - +					  bio->bi_iter.bi_sector, +					  GFP_NOIO, fs_bio_set); +			bio_chain(split, bio); +		} else { +			split = bio; +		} -	generic_make_request(bio); +		split->bi_iter.bi_sector = split->bi_iter.bi_sector - +			start_sector + data_offset; + +		if (unlikely((split->bi_rw & REQ_DISCARD) && +			 !blk_queue_discard(bdev_get_queue(split->bi_bdev)))) { +			/* Just ignore it */ +			bio_endio(split, 0); +		} else +			generic_make_request(split); +	} while (split != bio); +	return; + +out_of_bounds: +	printk(KERN_ERR +	       "md/linear:%s: make_request: Sector %llu out of bounds on " +	       "dev %s: %llu sectors, offset %llu\n", +	       mdname(mddev), +	       (unsigned long long)bio->bi_iter.bi_sector, +	       bdevname(tmp_dev->rdev->bdev, b), +	       (unsigned long long)tmp_dev->rdev->sectors, +	       (unsigned long long)start_sector); +	bio_io_error(bio);  }  static void linear_status (struct seq_file *seq, struct mddev *mddev) diff --git a/drivers/md/md.c b/drivers/md/md.c index adf4d7e1d5e..32fc19c540d 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -112,7 +112,7 @@ static inline int speed_max(struct mddev *mddev)  static struct ctl_table_header *raid_table_header; -static ctl_table raid_table[] = { +static struct ctl_table raid_table[] = {  	{  		.procname	= "speed_limit_min",  		.data		= &sysctl_speed_limit_min, @@ -130,7 +130,7 @@ static ctl_table raid_table[] = {  	{ }  }; -static ctl_table raid_dir_table[] = { +static struct ctl_table raid_dir_table[] = {  	{  		.procname	= "raid",  		.maxlen		= 0, @@ -140,7 +140,7 @@ static ctl_table raid_dir_table[] = {  	{ }  }; -static ctl_table raid_root_table[] = { +static struct ctl_table raid_root_table[] = {  	{  		.procname	= "dev",  		.maxlen		= 0, @@ -183,46 +183,6 @@ struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask,  }  EXPORT_SYMBOL_GPL(bio_clone_mddev); -void md_trim_bio(struct bio *bio, int offset, int size) -{ -	/* 'bio' is a cloned bio which we need to trim to match -	 * the given offset and size. -	 * This requires adjusting bi_sector, bi_size, and bi_io_vec -	 */ -	int i; -	struct bio_vec *bvec; -	int sofar = 0; - -	size <<= 9; -	if (offset == 0 && size == bio->bi_size) -		return; - -	clear_bit(BIO_SEG_VALID, &bio->bi_flags); - -	bio_advance(bio, offset << 9); - -	bio->bi_size = size; - -	/* avoid any complications with bi_idx being non-zero*/ -	if (bio->bi_idx) { -		memmove(bio->bi_io_vec, bio->bi_io_vec+bio->bi_idx, -			(bio->bi_vcnt - bio->bi_idx) * sizeof(struct bio_vec)); -		bio->bi_vcnt -= bio->bi_idx; -		bio->bi_idx = 0; -	} -	/* Make sure vcnt and last bv are not too big */ -	bio_for_each_segment(bvec, bio, i) { -		if (sofar + bvec->bv_len > size) -			bvec->bv_len = size - sofar; -		if (bvec->bv_len == 0) { -			bio->bi_vcnt = i; -			break; -		} -		sofar += bvec->bv_len; -	} -} -EXPORT_SYMBOL_GPL(md_trim_bio); -  /*   * We have a system wide 'event count' that is incremented   * on any 'interesting' event, and readers of /proc/mdstat @@ -433,7 +393,7 @@ static void md_submit_flush_data(struct work_struct *ws)  	struct mddev *mddev = container_of(ws, struct mddev, flush_work);  	struct bio *bio = mddev->flush_bio; -	if (bio->bi_size == 0) +	if (bio->bi_iter.bi_size == 0)  		/* an empty barrier - all done */  		bio_endio(bio, 0);  	else { @@ -602,11 +562,19 @@ static struct mddev * mddev_find(dev_t unit)  	goto retry;  } -static inline int mddev_lock(struct mddev * mddev) +static inline int __must_check mddev_lock(struct mddev * mddev)  {  	return mutex_lock_interruptible(&mddev->reconfig_mutex);  } +/* Sometimes we need to take the lock in a situation where + * failure due to interrupts is not acceptable. + */ +static inline void mddev_lock_nointr(struct mddev * mddev) +{ +	mutex_lock(&mddev->reconfig_mutex); +} +  static inline int mddev_is_locked(struct mddev *mddev)  {  	return mutex_is_locked(&mddev->reconfig_mutex); @@ -786,7 +754,7 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,  	struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev);  	bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev; -	bio->bi_sector = sector; +	bio->bi_iter.bi_sector = sector;  	bio_add_page(bio, page, size, 0);  	bio->bi_private = rdev;  	bio->bi_end_io = super_written; @@ -808,36 +776,24 @@ void md_super_wait(struct mddev *mddev)  	finish_wait(&mddev->sb_wait, &wq);  } -static void bi_complete(struct bio *bio, int error) -{ -	complete((struct completion*)bio->bi_private); -} -  int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,  		 struct page *page, int rw, bool metadata_op)  {  	struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev); -	struct completion event;  	int ret; -	rw |= REQ_SYNC; -  	bio->bi_bdev = (metadata_op && rdev->meta_bdev) ?  		rdev->meta_bdev : rdev->bdev;  	if (metadata_op) -		bio->bi_sector = sector + rdev->sb_start; +		bio->bi_iter.bi_sector = sector + rdev->sb_start;  	else if (rdev->mddev->reshape_position != MaxSector &&  		 (rdev->mddev->reshape_backwards ==  		  (sector >= rdev->mddev->reshape_position))) -		bio->bi_sector = sector + rdev->new_data_offset; +		bio->bi_iter.bi_sector = sector + rdev->new_data_offset;  	else -		bio->bi_sector = sector + rdev->data_offset; +		bio->bi_iter.bi_sector = sector + rdev->data_offset;  	bio_add_page(bio, page, size, 0); -	init_completion(&event); -	bio->bi_private = &event; -	bio->bi_end_io = bi_complete; -	submit_bio(rw, bio); -	wait_for_completion(&event); +	submit_bio_wait(rw, bio);  	ret = test_bit(BIO_UPTODATE, &bio->bi_flags);  	bio_put(bio); @@ -1119,6 +1075,7 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)  	rdev->raid_disk = -1;  	clear_bit(Faulty, &rdev->flags);  	clear_bit(In_sync, &rdev->flags); +	clear_bit(Bitmap_sync, &rdev->flags);  	clear_bit(WriteMostly, &rdev->flags);  	if (mddev->raid_disks == 0) { @@ -1197,6 +1154,8 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)  		 */  		if (ev1 < mddev->bitmap->events_cleared)  			return 0; +		if (ev1 < mddev->events) +			set_bit(Bitmap_sync, &rdev->flags);  	} else {  		if (ev1 < mddev->events)  			/* just a hot-add of a new device, leave raid_disk at -1 */ @@ -1212,6 +1171,7 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)  			    desc->raid_disk < mddev->raid_disks */) {  			set_bit(In_sync, &rdev->flags);  			rdev->raid_disk = desc->raid_disk; +			rdev->saved_raid_disk = desc->raid_disk;  		} else if (desc->state & (1<<MD_DISK_ACTIVE)) {  			/* active but not in sync implies recovery up to  			 * reshape position.  We don't know exactly where @@ -1605,6 +1565,7 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)  	rdev->raid_disk = -1;  	clear_bit(Faulty, &rdev->flags);  	clear_bit(In_sync, &rdev->flags); +	clear_bit(Bitmap_sync, &rdev->flags);  	clear_bit(WriteMostly, &rdev->flags);  	if (mddev->raid_disks == 0) { @@ -1687,6 +1648,8 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)  		 */  		if (ev1 < mddev->bitmap->events_cleared)  			return 0; +		if (ev1 < mddev->events) +			set_bit(Bitmap_sync, &rdev->flags);  	} else {  		if (ev1 < mddev->events)  			/* just a hot-add of a new device, leave raid_disk at -1 */ @@ -1707,10 +1670,14 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)  			set_bit(Faulty, &rdev->flags);  			break;  		default: +			rdev->saved_raid_disk = role;  			if ((le32_to_cpu(sb->feature_map) & -			     MD_FEATURE_RECOVERY_OFFSET)) +			     MD_FEATURE_RECOVERY_OFFSET)) {  				rdev->recovery_offset = le64_to_cpu(sb->recovery_offset); -			else +				if (!(le32_to_cpu(sb->feature_map) & +				      MD_FEATURE_RECOVERY_BITMAP)) +					rdev->saved_raid_disk = -1; +			} else  				set_bit(In_sync, &rdev->flags);  			rdev->raid_disk = role;  			break; @@ -1772,6 +1739,9 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)  			cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);  		sb->recovery_offset =  			cpu_to_le64(rdev->recovery_offset); +		if (rdev->saved_raid_disk >= 0 && mddev->bitmap) +			sb->feature_map |= +				cpu_to_le32(MD_FEATURE_RECOVERY_BITMAP);  	}  	if (test_bit(Replacement, &rdev->flags))  		sb->feature_map |= @@ -2513,8 +2483,7 @@ repeat:  		if (rdev->sb_loaded != 1)  			continue; /* no noise on spare devices */ -		if (!test_bit(Faulty, &rdev->flags) && -		    rdev->saved_raid_disk == -1) { +		if (!test_bit(Faulty, &rdev->flags)) {  			md_super_write(mddev,rdev,  				       rdev->sb_start, rdev->sb_size,  				       rdev->sb_page); @@ -2530,11 +2499,9 @@ repeat:  				rdev->badblocks.size = 0;  			} -		} else if (test_bit(Faulty, &rdev->flags)) +		} else  			pr_debug("md: %s (skipping faulty)\n",  				 bdevname(rdev->bdev, b)); -		else -			pr_debug("(skipping incremental s/r ");  		if (mddev->level == LEVEL_MULTIPATH)  			/* only need to write one superblock... */ @@ -2650,6 +2617,8 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)  	 *  blocked - sets the Blocked flags  	 *  -blocked - clears the Blocked and possibly simulates an error  	 *  insync - sets Insync providing device isn't active +	 *  -insync - clear Insync for a device with a slot assigned, +	 *            so that it gets rebuilt based on bitmap  	 *  write_error - sets WriteErrorSeen  	 *  -write_error - clears WriteErrorSeen  	 */ @@ -2698,6 +2667,11 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)  	} else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {  		set_bit(In_sync, &rdev->flags);  		err = 0; +	} else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0) { +		clear_bit(In_sync, &rdev->flags); +		rdev->saved_raid_disk = rdev->raid_disk; +		rdev->raid_disk = -1; +		err = 0;  	} else if (cmd_match(buf, "write_error")) {  		set_bit(WriteErrorSeen, &rdev->flags);  		err = 0; @@ -2830,6 +2804,7 @@ slot_store(struct md_rdev *rdev, const char *buf, size_t len)  		else  			rdev->saved_raid_disk = -1;  		clear_bit(In_sync, &rdev->flags); +		clear_bit(Bitmap_sync, &rdev->flags);  		err = rdev->mddev->pers->  			hot_add_disk(rdev->mddev, rdev);  		if (err) { @@ -3018,7 +2993,7 @@ rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)  		for_each_mddev(mddev, tmp) {  			struct md_rdev *rdev2; -			mddev_lock(mddev); +			mddev_lock_nointr(mddev);  			rdev_for_each(rdev2, mddev)  				if (rdev->bdev == rdev2->bdev &&  				    rdev != rdev2 && @@ -3034,7 +3009,7 @@ rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)  				break;  			}  		} -		mddev_lock(my_mddev); +		mddev_lock_nointr(my_mddev);  		if (overlap) {  			/* Someone else could have slipped in a size  			 * change here, but doing so is just silly. @@ -3473,6 +3448,8 @@ level_store(struct mddev *mddev, const char *buf, size_t len)  		mddev->level = LEVEL_NONE;  		return rv;  	} +	if (mddev->ro) +		return  -EROFS;  	/* request to change the personality.  Need to ensure:  	 *  - array is not engaged in resync/recovery/reshape @@ -3555,7 +3532,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len)  			printk(KERN_WARNING  			       "md: cannot register extra attributes for %s\n",  			       mdname(mddev)); -		mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, NULL, "sync_action"); +		mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");  	}		  	if (mddev->pers->sync_request != NULL &&  	    pers->sync_request == NULL) { @@ -3620,9 +3597,12 @@ level_store(struct mddev *mddev, const char *buf, size_t len)  		mddev->in_sync = 1;  		del_timer_sync(&mddev->safemode_timer);  	} +	blk_set_stacking_limits(&mddev->queue->limits);  	pers->run(mddev);  	set_bit(MD_CHANGE_DEVS, &mddev->flags);  	mddev_resume(mddev); +	if (!mddev->thread) +		md_update_sb(mddev, 1);  	sysfs_notify(&mddev->kobj, NULL, "level");  	md_new_event(mddev);  	return rv; @@ -3656,6 +3636,8 @@ layout_store(struct mddev *mddev, const char *buf, size_t len)  		int err;  		if (mddev->pers->check_reshape == NULL)  			return -EBUSY; +		if (mddev->ro) +			return -EROFS;  		mddev->new_layout = n;  		err = mddev->pers->check_reshape(mddev);  		if (err) { @@ -3745,6 +3727,8 @@ chunk_size_store(struct mddev *mddev, const char *buf, size_t len)  		int err;  		if (mddev->pers->check_reshape == NULL)  			return -EBUSY; +		if (mddev->ro) +			return -EROFS;  		mddev->new_chunk_sectors = n >> 9;  		err = mddev->pers->check_reshape(mddev);  		if (err) { @@ -5203,32 +5187,6 @@ static int restart_array(struct mddev *mddev)  	return 0;  } -/* similar to deny_write_access, but accounts for our holding a reference - * to the file ourselves */ -static int deny_bitmap_write_access(struct file * file) -{ -	struct inode *inode = file->f_mapping->host; - -	spin_lock(&inode->i_lock); -	if (atomic_read(&inode->i_writecount) > 1) { -		spin_unlock(&inode->i_lock); -		return -ETXTBSY; -	} -	atomic_set(&inode->i_writecount, -1); -	spin_unlock(&inode->i_lock); - -	return 0; -} - -void restore_bitmap_write_access(struct file *file) -{ -	struct inode *inode = file->f_mapping->host; - -	spin_lock(&inode->i_lock); -	atomic_set(&inode->i_writecount, 1); -	spin_unlock(&inode->i_lock); -} -  static void md_clean(struct mddev *mddev)  {  	mddev->array_sectors = 0; @@ -5298,7 +5256,7 @@ static void __md_stop_writes(struct mddev *mddev)  void md_stop_writes(struct mddev *mddev)  { -	mddev_lock(mddev); +	mddev_lock_nointr(mddev);  	__md_stop_writes(mddev);  	mddev_unlock(mddev);  } @@ -5331,20 +5289,35 @@ EXPORT_SYMBOL_GPL(md_stop);  static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)  {  	int err = 0; +	int did_freeze = 0; + +	if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) { +		did_freeze = 1; +		set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); +		md_wakeup_thread(mddev->thread); +	} +	if (mddev->sync_thread) { +		set_bit(MD_RECOVERY_INTR, &mddev->recovery); +		/* Thread might be blocked waiting for metadata update +		 * which will now never happen */ +		wake_up_process(mddev->sync_thread->tsk); +	} +	mddev_unlock(mddev); +	wait_event(resync_wait, mddev->sync_thread == NULL); +	mddev_lock_nointr(mddev); +  	mutex_lock(&mddev->open_mutex); -	if (atomic_read(&mddev->openers) > !!bdev) { +	if (atomic_read(&mddev->openers) > !!bdev || +	    mddev->sync_thread || +	    (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) {  		printk("md: %s still in use.\n",mdname(mddev)); +		if (did_freeze) { +			clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); +			md_wakeup_thread(mddev->thread); +		}  		err = -EBUSY;  		goto out;  	} -	if (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags)) { -		/* Someone opened the device since we flushed it -		 * so page cache could be dirty and it is too late -		 * to flush.  So abort -		 */ -		mutex_unlock(&mddev->open_mutex); -		return -EBUSY; -	}  	if (mddev->pers) {  		__md_stop_writes(mddev); @@ -5355,7 +5328,7 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)  		set_disk_ro(mddev->gendisk, 1);  		clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);  		sysfs_notify_dirent_safe(mddev->sysfs_state); -		err = 0;	 +		err = 0;  	}  out:  	mutex_unlock(&mddev->open_mutex); @@ -5371,20 +5344,34 @@ static int do_md_stop(struct mddev * mddev, int mode,  {  	struct gendisk *disk = mddev->gendisk;  	struct md_rdev *rdev; +	int did_freeze = 0; + +	if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) { +		did_freeze = 1; +		set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); +		md_wakeup_thread(mddev->thread); +	} +	if (mddev->sync_thread) { +		set_bit(MD_RECOVERY_INTR, &mddev->recovery); +		/* Thread might be blocked waiting for metadata update +		 * which will now never happen */ +		wake_up_process(mddev->sync_thread->tsk); +	} +	mddev_unlock(mddev); +	wait_event(resync_wait, mddev->sync_thread == NULL); +	mddev_lock_nointr(mddev);  	mutex_lock(&mddev->open_mutex);  	if (atomic_read(&mddev->openers) > !!bdev || -	    mddev->sysfs_active) { +	    mddev->sysfs_active || +	    mddev->sync_thread || +	    (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) {  		printk("md: %s still in use.\n",mdname(mddev));  		mutex_unlock(&mddev->open_mutex); -		return -EBUSY; -	} -	if (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags)) { -		/* Someone opened the device since we flushed it -		 * so page cache could be dirty and it is too late -		 * to flush.  So abort -		 */ -		mutex_unlock(&mddev->open_mutex); +		if (did_freeze) { +			clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); +			md_wakeup_thread(mddev->thread); +		}  		return -EBUSY;  	}  	if (mddev->pers) { @@ -5420,7 +5407,6 @@ static int do_md_stop(struct mddev * mddev, int mode,  		bitmap_destroy(mddev);  		if (mddev->bitmap_info.file) { -			restore_bitmap_write_access(mddev->bitmap_info.file);  			fput(mddev->bitmap_info.file);  			mddev->bitmap_info.file = NULL;  		} @@ -5613,7 +5599,7 @@ static int get_array_info(struct mddev * mddev, void __user * arg)  	if (mddev->in_sync)  		info.state = (1<<MD_SB_CLEAN);  	if (mddev->bitmap && mddev->bitmap_info.offset) -		info.state = (1<<MD_SB_BITMAP_PRESENT); +		info.state |= (1<<MD_SB_BITMAP_PRESENT);  	info.active_disks  = insync;  	info.working_disks = working;  	info.failed_disks  = failed; @@ -5772,8 +5758,10 @@ static int add_new_disk(struct mddev * mddev, mdu_disk_info_t *info)  			    info->raid_disk < mddev->raid_disks) {  				rdev->raid_disk = info->raid_disk;  				set_bit(In_sync, &rdev->flags); +				clear_bit(Bitmap_sync, &rdev->flags);  			} else  				rdev->raid_disk = -1; +			rdev->saved_raid_disk = rdev->raid_disk;  		} else  			super_types[mddev->major_version].  				validate_super(mddev, rdev); @@ -5786,11 +5774,6 @@ static int add_new_disk(struct mddev * mddev, mdu_disk_info_t *info)  			return -EINVAL;  		} -		if (test_bit(In_sync, &rdev->flags)) -			rdev->saved_raid_disk = rdev->raid_disk; -		else -			rdev->saved_raid_disk = -1; -  		clear_bit(In_sync, &rdev->flags); /* just to be sure */  		if (info->state & (1<<MD_DISK_WRITEMOSTLY))  			set_bit(WriteMostly, &rdev->flags); @@ -5975,7 +5958,7 @@ abort_export:  static int set_bitmap_file(struct mddev *mddev, int fd)  { -	int err; +	int err = 0;  	if (mddev->pers) {  		if (!mddev->pers->quiesce) @@ -5987,6 +5970,7 @@ static int set_bitmap_file(struct mddev *mddev, int fd)  	if (fd >= 0) { +		struct inode *inode;  		if (mddev->bitmap)  			return -EEXIST; /* cannot add when bitmap is present */  		mddev->bitmap_info.file = fget(fd); @@ -5997,10 +5981,21 @@ static int set_bitmap_file(struct mddev *mddev, int fd)  			return -EBADF;  		} -		err = deny_bitmap_write_access(mddev->bitmap_info.file); -		if (err) { +		inode = mddev->bitmap_info.file->f_mapping->host; +		if (!S_ISREG(inode->i_mode)) { +			printk(KERN_ERR "%s: error: bitmap file must be a regular file\n", +			       mdname(mddev)); +			err = -EBADF; +		} else if (!(mddev->bitmap_info.file->f_mode & FMODE_WRITE)) { +			printk(KERN_ERR "%s: error: bitmap file must open for write\n", +			       mdname(mddev)); +			err = -EBADF; +		} else if (atomic_read(&inode->i_writecount) != 1) {  			printk(KERN_ERR "%s: error: bitmap file is already in use\n",  			       mdname(mddev)); +			err = -EBUSY; +		} +		if (err) {  			fput(mddev->bitmap_info.file);  			mddev->bitmap_info.file = NULL;  			return err; @@ -6023,10 +6018,8 @@ static int set_bitmap_file(struct mddev *mddev, int fd)  		mddev->pers->quiesce(mddev, 0);  	}  	if (fd < 0) { -		if (mddev->bitmap_info.file) { -			restore_bitmap_write_access(mddev->bitmap_info.file); +		if (mddev->bitmap_info.file)  			fput(mddev->bitmap_info.file); -		}  		mddev->bitmap_info.file = NULL;  	} @@ -6148,6 +6141,8 @@ static int update_size(struct mddev *mddev, sector_t num_sectors)  	 */  	if (mddev->sync_thread)  		return -EBUSY; +	if (mddev->ro) +		return -EROFS;  	rdev_for_each(rdev, mddev) {  		sector_t avail = rdev->sectors; @@ -6170,6 +6165,8 @@ static int update_raid_disks(struct mddev *mddev, int raid_disks)  	/* change the number of raid disks */  	if (mddev->pers->check_reshape == NULL)  		return -EINVAL; +	if (mddev->ro) +		return -EROFS;  	if (raid_disks <= 0 ||  	    (mddev->max_disks && raid_disks >= mddev->max_disks))  		return -EINVAL; @@ -6340,6 +6337,32 @@ static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)  	return 0;  } +static inline bool md_ioctl_valid(unsigned int cmd) +{ +	switch (cmd) { +	case ADD_NEW_DISK: +	case BLKROSET: +	case GET_ARRAY_INFO: +	case GET_BITMAP_FILE: +	case GET_DISK_INFO: +	case HOT_ADD_DISK: +	case HOT_REMOVE_DISK: +	case PRINT_RAID_DEBUG: +	case RAID_AUTORUN: +	case RAID_VERSION: +	case RESTART_ARRAY_RW: +	case RUN_ARRAY: +	case SET_ARRAY_INFO: +	case SET_BITMAP_FILE: +	case SET_DISK_FAULTY: +	case STOP_ARRAY: +	case STOP_ARRAY_RO: +		return true; +	default: +		return false; +	} +} +  static int md_ioctl(struct block_device *bdev, fmode_t mode,  			unsigned int cmd, unsigned long arg)  { @@ -6348,6 +6371,9 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,  	struct mddev *mddev = NULL;  	int ro; +	if (!md_ioctl_valid(cmd)) +		return -ENOTTY; +  	switch (cmd) {  	case RAID_VERSION:  	case GET_ARRAY_INFO: @@ -6591,7 +6617,7 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,  				wait_event(mddev->sb_wait,  					   !test_bit(MD_CHANGE_DEVS, &mddev->flags) &&  					   !test_bit(MD_CHANGE_PENDING, &mddev->flags)); -				mddev_lock(mddev); +				mddev_lock_nointr(mddev);  			}  		} else {  			err = -EROFS; @@ -7149,11 +7175,14 @@ static int md_seq_open(struct inode *inode, struct file *file)  	return error;  } +static int md_unloading;  static unsigned int mdstat_poll(struct file *filp, poll_table *wait)  {  	struct seq_file *seq = filp->private_data;  	int mask; +	if (md_unloading) +		return POLLIN|POLLRDNORM|POLLERR|POLLPRI;;  	poll_wait(filp, &md_event_waiters, wait);  	/* always allow read */ @@ -7362,8 +7391,10 @@ void md_do_sync(struct md_thread *thread)  	/* just incase thread restarts... */  	if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))  		return; -	if (mddev->ro) /* never try to sync a read-only array */ +	if (mddev->ro) {/* never try to sync a read-only array */ +		set_bit(MD_RECOVERY_INTR, &mddev->recovery);  		return; +	}  	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {  		if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) { @@ -7401,9 +7432,6 @@ void md_do_sync(struct md_thread *thread)  		mddev->curr_resync = 2;  	try_again: -		if (kthread_should_stop()) -			set_bit(MD_RECOVERY_INTR, &mddev->recovery); -  		if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))  			goto skip;  		for_each_mddev(mddev2, tmp) { @@ -7428,7 +7456,7 @@ void md_do_sync(struct md_thread *thread)  				 * be caught by 'softlockup'  				 */  				prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE); -				if (!kthread_should_stop() && +				if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&  				    mddev2->curr_resync >= mddev->curr_resync) {  					printk(KERN_INFO "md: delaying %s of %s"  					       " until %s has finished (they" @@ -7473,6 +7501,19 @@ void md_do_sync(struct md_thread *thread)  			    rdev->recovery_offset < j)  				j = rdev->recovery_offset;  		rcu_read_unlock(); + +		/* If there is a bitmap, we need to make sure all +		 * writes that started before we added a spare +		 * complete before we start doing a recovery. +		 * Otherwise the write might complete and (via +		 * bitmap_endwrite) set a bit in the bitmap after the +		 * recovery has checked that bit and skipped that +		 * region. +		 */ +		if (mddev->bitmap) { +			mddev->pers->quiesce(mddev, 1); +			mddev->pers->quiesce(mddev, 0); +		}  	}  	printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev)); @@ -7504,7 +7545,7 @@ void md_do_sync(struct md_thread *thread)  	last_check = 0;  	if (j>2) { -		printk(KERN_INFO  +		printk(KERN_INFO  		       "md: resuming %s of %s from checkpoint.\n",  		       desc, mdname(mddev));  		mddev->curr_resync = j; @@ -7541,7 +7582,8 @@ void md_do_sync(struct md_thread *thread)  			sysfs_notify(&mddev->kobj, NULL, "sync_completed");  		} -		while (j >= mddev->resync_max && !kthread_should_stop()) { +		while (j >= mddev->resync_max && +		       !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {  			/* As this condition is controlled by user-space,  			 * we can block indefinitely, so use '_interruptible'  			 * to avoid triggering warnings. @@ -7549,17 +7591,18 @@ void md_do_sync(struct md_thread *thread)  			flush_signals(current); /* just in case */  			wait_event_interruptible(mddev->recovery_wait,  						 mddev->resync_max > j -						 || kthread_should_stop()); +						 || test_bit(MD_RECOVERY_INTR, +							     &mddev->recovery));  		} -		if (kthread_should_stop()) -			goto interrupted; +		if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) +			break;  		sectors = mddev->pers->sync_request(mddev, j, &skipped,  						  currspeed < speed_min(mddev));  		if (sectors == 0) {  			set_bit(MD_RECOVERY_INTR, &mddev->recovery); -			goto out; +			break;  		}  		if (!skipped) { /* actual IO requested */ @@ -7596,10 +7639,8 @@ void md_do_sync(struct md_thread *thread)  			last_mark = next;  		} - -		if (kthread_should_stop()) -			goto interrupted; - +		if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) +			break;  		/*  		 * this loop exits only if either when we are slower than @@ -7622,11 +7663,12 @@ void md_do_sync(struct md_thread *thread)  			}  		}  	} -	printk(KERN_INFO "md: %s: %s done.\n",mdname(mddev), desc); +	printk(KERN_INFO "md: %s: %s %s.\n",mdname(mddev), desc, +	       test_bit(MD_RECOVERY_INTR, &mddev->recovery) +	       ? "interrupted" : "done");  	/*  	 * this also signals 'finished resyncing' to md_stop  	 */ - out:  	blk_finish_plug(&plug);  	wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active)); @@ -7680,16 +7722,6 @@ void md_do_sync(struct md_thread *thread)  	set_bit(MD_RECOVERY_DONE, &mddev->recovery);  	md_wakeup_thread(mddev->thread);  	return; - - interrupted: -	/* -	 * got a signal, exit. -	 */ -	printk(KERN_INFO -	       "md: md_do_sync() got signal ... exiting\n"); -	set_bit(MD_RECOVERY_INTR, &mddev->recovery); -	goto out; -  }  EXPORT_SYMBOL_GPL(md_do_sync); @@ -7730,10 +7762,12 @@ static int remove_and_add_spares(struct mddev *mddev,  		if (test_bit(Faulty, &rdev->flags))  			continue;  		if (mddev->ro && -		    rdev->saved_raid_disk < 0) +		    ! (rdev->saved_raid_disk >= 0 && +		       !test_bit(Bitmap_sync, &rdev->flags)))  			continue; -		rdev->recovery_offset = 0; +		if (rdev->saved_raid_disk < 0) +			rdev->recovery_offset = 0;  		if (mddev->pers->  		    hot_add_disk(mddev, rdev) == 0) {  			if (sysfs_link_rdev(mddev, rdev)) @@ -7791,7 +7825,7 @@ void md_check_recovery(struct mddev *mddev)  	if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))  		return;  	if ( ! ( -		(mddev->flags & ~ (1<<MD_CHANGE_PENDING)) || +		(mddev->flags & MD_UPDATE_SB_FLAGS & ~ (1<<MD_CHANGE_PENDING)) ||  		test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||  		test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||  		(mddev->external == 0 && mddev->safemode == 1) || @@ -7811,9 +7845,13 @@ void md_check_recovery(struct mddev *mddev)  			 * As we only add devices that are already in-sync,  			 * we can activate the spares immediately.  			 */ -			clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);  			remove_and_add_spares(mddev, NULL); -			mddev->pers->spare_active(mddev); +			/* There is no thread, but we need to call +			 * ->spare_active and clear saved_raid_disk +			 */ +			set_bit(MD_RECOVERY_INTR, &mddev->recovery); +			md_reap_sync_thread(mddev); +			clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);  			goto unlock;  		} @@ -7934,6 +7972,7 @@ void md_reap_sync_thread(struct mddev *mddev)  	/* resync has finished, collect result */  	md_unregister_thread(&mddev->sync_thread); +	wake_up(&resync_wait);  	if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&  	    !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {  		/* success...*/ @@ -7949,14 +7988,10 @@ void md_reap_sync_thread(struct mddev *mddev)  		mddev->pers->finish_reshape(mddev);  	/* If array is no-longer degraded, then any saved_raid_disk -	 * information must be scrapped.  Also if any device is now -	 * In_sync we must scrape the saved_raid_disk for that device -	 * do the superblock for an incrementally recovered device -	 * written out. +	 * information must be scrapped.  	 */ -	rdev_for_each(rdev, mddev) -		if (!mddev->degraded || -		    test_bit(In_sync, &rdev->flags)) +	if (!mddev->degraded) +		rdev_for_each(rdev, mddev)  			rdev->saved_raid_disk = -1;  	md_update_sb(mddev, 1); @@ -8111,6 +8146,7 @@ static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,  	u64 *p;  	int lo, hi;  	int rv = 1; +	unsigned long flags;  	if (bb->shift < 0)  		/* badblocks are disabled */ @@ -8125,7 +8161,7 @@ static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,  		sectors = next - s;  	} -	write_seqlock_irq(&bb->lock); +	write_seqlock_irqsave(&bb->lock, flags);  	p = bb->page;  	lo = 0; @@ -8241,7 +8277,7 @@ static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,  	bb->changed = 1;  	if (!acknowledged)  		bb->unacked_exist = 1; -	write_sequnlock_irq(&bb->lock); +	write_sequnlock_irqrestore(&bb->lock, flags);  	return rv;  } @@ -8320,7 +8356,7 @@ static int md_clear_badblocks(struct badblocks *bb, sector_t s, int sectors)  			if (a < s) {  				/* we need to split this range */  				if (bb->count >= MD_MAX_BADBLOCKS) { -					rv = 0; +					rv = -ENOSPC;  					goto out;  				}  				memmove(p+lo+1, p+lo, (bb->count - lo) * 8); @@ -8506,7 +8542,8 @@ static int md_notify_reboot(struct notifier_block *this,  		if (mddev_trylock(mddev)) {  			if (mddev->pers)  				__md_stop_writes(mddev); -			mddev->safemode = 2; +			if (mddev->persistent) +				mddev->safemode = 2;  			mddev_unlock(mddev);  		}  		need_delay = 1; @@ -8648,6 +8685,7 @@ static __exit void md_exit(void)  {  	struct mddev *mddev;  	struct list_head *tmp; +	int delay = 1;  	blk_unregister_region(MKDEV(MD_MAJOR,0), 1U << MINORBITS);  	blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS); @@ -8656,7 +8694,19 @@ static __exit void md_exit(void)  	unregister_blkdev(mdp_major, "mdp");  	unregister_reboot_notifier(&md_notifier);  	unregister_sysctl_table(raid_table_header); + +	/* We cannot unload the modules while some process is +	 * waiting for us in select() or poll() - wake them up +	 */ +	md_unloading = 1; +	while (waitqueue_active(&md_event_waiters)) { +		/* not safe to leave yet */ +		wake_up(&md_event_waiters); +		msleep(delay); +		delay += delay; +	}  	remove_proc_entry("mdstat", NULL); +  	for_each_mddev(mddev, tmp) {  		export_array(mddev);  		mddev->hold_active = 0; diff --git a/drivers/md/md.h b/drivers/md/md.h index 608050c43f1..a49d991f3fe 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -106,7 +106,7 @@ struct md_rdev {  					   */  	struct work_struct del_work;	/* used for delayed sysfs removal */ -	struct sysfs_dirent *sysfs_state; /* handle for 'state' +	struct kernfs_node *sysfs_state; /* handle for 'state'  					   * sysfs entry */  	struct badblocks { @@ -129,6 +129,9 @@ struct md_rdev {  enum flag_bits {  	Faulty,			/* device is known to have a fault */  	In_sync,		/* device is in_sync with rest of array */ +	Bitmap_sync,		/* ..actually, not quite In_sync.  Need a +				 * bitmap-based recovery to get fully in sync +				 */  	Unmerged,		/* device is being added to array and should  				 * be considerred for bvec_merge_fn but not  				 * yet for actual IO @@ -376,10 +379,10 @@ struct mddev {  	sector_t			resync_max;	/* resync should pause  							 * when it gets here */ -	struct sysfs_dirent		*sysfs_state;	/* handle for 'array_state' +	struct kernfs_node		*sysfs_state;	/* handle for 'array_state'  							 * file in sysfs.  							 */ -	struct sysfs_dirent		*sysfs_action;  /* handle for 'sync_action' */ +	struct kernfs_node		*sysfs_action;  /* handle for 'sync_action' */  	struct work_struct del_work;	/* used for delayed sysfs removal */ @@ -498,13 +501,13 @@ struct md_sysfs_entry {  };  extern struct attribute_group md_bitmap_group; -static inline struct sysfs_dirent *sysfs_get_dirent_safe(struct sysfs_dirent *sd, char *name) +static inline struct kernfs_node *sysfs_get_dirent_safe(struct kernfs_node *sd, char *name)  {  	if (sd) -		return sysfs_get_dirent(sd, NULL, name); +		return sysfs_get_dirent(sd, name);  	return sd;  } -static inline void sysfs_notify_dirent_safe(struct sysfs_dirent *sd) +static inline void sysfs_notify_dirent_safe(struct kernfs_node *sd)  {  	if (sd)  		sysfs_notify_dirent(sd); @@ -602,7 +605,6 @@ extern int md_check_no_bitmap(struct mddev *mddev);  extern int md_integrity_register(struct mddev *mddev);  extern void md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev);  extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale); -extern void restore_bitmap_write_access(struct file *file);  extern void mddev_init(struct mddev *mddev);  extern int md_run(struct mddev *mddev); @@ -617,7 +619,6 @@ extern struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask,  				   struct mddev *mddev);  extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,  				   struct mddev *mddev); -extern void md_trim_bio(struct bio *bio, int offset, int size);  extern void md_unplug(struct blk_plug_cb *cb, bool from_schedule);  static inline int mddev_check_plugged(struct mddev *mddev) diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index 1642eae75a3..849ad39f547 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c @@ -100,7 +100,7 @@ static void multipath_end_request(struct bio *bio, int error)  		md_error (mp_bh->mddev, rdev);  		printk(KERN_ERR "multipath: %s: rescheduling sector %llu\n",   		       bdevname(rdev->bdev,b),  -		       (unsigned long long)bio->bi_sector); +		       (unsigned long long)bio->bi_iter.bi_sector);  		multipath_reschedule_retry(mp_bh);  	} else  		multipath_end_bh_io(mp_bh, error); @@ -132,7 +132,7 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio)  	multipath = conf->multipaths + mp_bh->path;  	mp_bh->bio = *bio; -	mp_bh->bio.bi_sector += multipath->rdev->data_offset; +	mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset;  	mp_bh->bio.bi_bdev = multipath->rdev->bdev;  	mp_bh->bio.bi_rw |= REQ_FAILFAST_TRANSPORT;  	mp_bh->bio.bi_end_io = multipath_end_request; @@ -355,21 +355,22 @@ static void multipathd(struct md_thread *thread)  		spin_unlock_irqrestore(&conf->device_lock, flags);  		bio = &mp_bh->bio; -		bio->bi_sector = mp_bh->master_bio->bi_sector; +		bio->bi_iter.bi_sector = mp_bh->master_bio->bi_iter.bi_sector;  		if ((mp_bh->path = multipath_map (conf))<0) {  			printk(KERN_ALERT "multipath: %s: unrecoverable IO read"  				" error for block %llu\n",  				bdevname(bio->bi_bdev,b), -				(unsigned long long)bio->bi_sector); +				(unsigned long long)bio->bi_iter.bi_sector);  			multipath_end_bh_io(mp_bh, -EIO);  		} else {  			printk(KERN_ERR "multipath: %s: redirecting sector %llu"  				" to another IO path\n",  				bdevname(bio->bi_bdev,b), -				(unsigned long long)bio->bi_sector); +				(unsigned long long)bio->bi_iter.bi_sector);  			*bio = *(mp_bh->master_bio); -			bio->bi_sector += conf->multipaths[mp_bh->path].rdev->data_offset; +			bio->bi_iter.bi_sector += +				conf->multipaths[mp_bh->path].rdev->data_offset;  			bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev;  			bio->bi_rw |= REQ_FAILFAST_TRANSPORT;  			bio->bi_end_io = multipath_end_request; diff --git a/drivers/md/persistent-data/Kconfig b/drivers/md/persistent-data/Kconfig index 19b26879541..0c2dec7aec2 100644 --- a/drivers/md/persistent-data/Kconfig +++ b/drivers/md/persistent-data/Kconfig @@ -6,3 +6,13 @@ config DM_PERSISTENT_DATA         ---help---  	 Library providing immutable on-disk data structure support for  	 device-mapper targets such as the thin provisioning target. + +config DM_DEBUG_BLOCK_STACK_TRACING +       boolean "Keep stack trace of persistent data block lock holders" +       depends on STACKTRACE_SUPPORT && DM_PERSISTENT_DATA +       select STACKTRACE +       ---help--- +	 Enable this for messages that may help debug problems with the +	 block manager locking used by thin provisioning and caching. + +	 If unsure, say N. diff --git a/drivers/md/persistent-data/dm-array.c b/drivers/md/persistent-data/dm-array.c index 172147eb1d4..1d75b1dc1e2 100644 --- a/drivers/md/persistent-data/dm-array.c +++ b/drivers/md/persistent-data/dm-array.c @@ -317,8 +317,16 @@ static int shadow_ablock(struct dm_array_info *info, dm_block_t *root,  	 * The shadow op will often be a noop.  Only insert if it really  	 * copied data.  	 */ -	if (dm_block_location(*block) != b) +	if (dm_block_location(*block) != b) { +		/* +		 * dm_tm_shadow_block will have already decremented the old +		 * block, but it is still referenced by the btree.  We +		 * increment to stop the insert decrementing it below zero +		 * when overwriting the old value. +		 */ +		dm_tm_inc(info->btree_info.tm, b);  		r = insert_ablock(info, index, *block, root); +	}  	return r;  } @@ -509,15 +517,18 @@ static int grow_add_tail_block(struct resize *resize)  static int grow_needs_more_blocks(struct resize *resize)  {  	int r; +	unsigned old_nr_blocks = resize->old_nr_full_blocks;  	if (resize->old_nr_entries_in_last_block > 0) { +		old_nr_blocks++; +  		r = grow_extend_tail_block(resize, resize->max_entries);  		if (r)  			return r;  	}  	r = insert_full_ablocks(resize->info, resize->size_of_block, -				resize->old_nr_full_blocks, +				old_nr_blocks,  				resize->new_nr_full_blocks,  				resize->max_entries, resize->value,  				&resize->root); diff --git a/drivers/md/persistent-data/dm-bitset.c b/drivers/md/persistent-data/dm-bitset.c index cd9a86d4cdf..36f7cc2c710 100644 --- a/drivers/md/persistent-data/dm-bitset.c +++ b/drivers/md/persistent-data/dm-bitset.c @@ -65,7 +65,7 @@ int dm_bitset_flush(struct dm_disk_bitset *info, dm_block_t root,  	int r;  	__le64 value; -	if (!info->current_index_set) +	if (!info->current_index_set || !info->dirty)  		return 0;  	value = cpu_to_le64(info->current_bits); @@ -77,6 +77,8 @@ int dm_bitset_flush(struct dm_disk_bitset *info, dm_block_t root,  		return r;  	info->current_index_set = false; +	info->dirty = false; +  	return 0;  }  EXPORT_SYMBOL_GPL(dm_bitset_flush); @@ -94,6 +96,8 @@ static int read_bits(struct dm_disk_bitset *info, dm_block_t root,  	info->current_bits = le64_to_cpu(value);  	info->current_index_set = true;  	info->current_index = array_index; +	info->dirty = false; +  	return 0;  } @@ -126,6 +130,8 @@ int dm_bitset_set_bit(struct dm_disk_bitset *info, dm_block_t root,  		return r;  	set_bit(b, (unsigned long *) &info->current_bits); +	info->dirty = true; +  	return 0;  }  EXPORT_SYMBOL_GPL(dm_bitset_set_bit); @@ -141,6 +147,8 @@ int dm_bitset_clear_bit(struct dm_disk_bitset *info, dm_block_t root,  		return r;  	clear_bit(b, (unsigned long *) &info->current_bits); +	info->dirty = true; +  	return 0;  }  EXPORT_SYMBOL_GPL(dm_bitset_clear_bit); diff --git a/drivers/md/persistent-data/dm-bitset.h b/drivers/md/persistent-data/dm-bitset.h index e1b9bea14aa..c2287d672ef 100644 --- a/drivers/md/persistent-data/dm-bitset.h +++ b/drivers/md/persistent-data/dm-bitset.h @@ -71,6 +71,7 @@ struct dm_disk_bitset {  	uint64_t current_bits;  	bool current_index_set:1; +	bool dirty:1;  };  /* diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c index a7e8bf29638..087411c95ff 100644 --- a/drivers/md/persistent-data/dm-block-manager.c +++ b/drivers/md/persistent-data/dm-block-manager.c @@ -104,7 +104,7 @@ static int __check_holder(struct block_lock *lock)  	for (i = 0; i < MAX_HOLDERS; i++) {  		if (lock->holders[i] == current) { -			DMERR("recursive lock detected in pool metadata"); +			DMERR("recursive lock detected in metadata");  #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING  			DMERR("previously held here:");  			print_stack_trace(lock->traces + i, 4); @@ -595,25 +595,14 @@ int dm_bm_unlock(struct dm_block *b)  }  EXPORT_SYMBOL_GPL(dm_bm_unlock); -int dm_bm_flush_and_unlock(struct dm_block_manager *bm, -			   struct dm_block *superblock) +int dm_bm_flush(struct dm_block_manager *bm)  { -	int r; -  	if (bm->read_only)  		return -EPERM; -	r = dm_bufio_write_dirty_buffers(bm->bufio); -	if (unlikely(r)) { -		dm_bm_unlock(superblock); -		return r; -	} - -	dm_bm_unlock(superblock); -  	return dm_bufio_write_dirty_buffers(bm->bufio);  } -EXPORT_SYMBOL_GPL(dm_bm_flush_and_unlock); +EXPORT_SYMBOL_GPL(dm_bm_flush);  void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b)  { @@ -626,6 +615,12 @@ void dm_bm_set_read_only(struct dm_block_manager *bm)  }  EXPORT_SYMBOL_GPL(dm_bm_set_read_only); +void dm_bm_set_read_write(struct dm_block_manager *bm) +{ +	bm->read_only = false; +} +EXPORT_SYMBOL_GPL(dm_bm_set_read_write); +  u32 dm_bm_checksum(const void *data, size_t len, u32 init_xor)  {  	return crc32c(~(u32) 0, data, len) ^ init_xor; diff --git a/drivers/md/persistent-data/dm-block-manager.h b/drivers/md/persistent-data/dm-block-manager.h index 9a82083a66b..1b95dfc1778 100644 --- a/drivers/md/persistent-data/dm-block-manager.h +++ b/drivers/md/persistent-data/dm-block-manager.h @@ -105,12 +105,11 @@ int dm_bm_unlock(struct dm_block *b);   *   * This method always blocks.   */ -int dm_bm_flush_and_unlock(struct dm_block_manager *bm, -			   struct dm_block *superblock); +int dm_bm_flush(struct dm_block_manager *bm); - /* -  * Request data be prefetched into the cache. -  */ +/* + * Request data is prefetched into the cache. + */  void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b);  /* @@ -125,6 +124,7 @@ void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b);   * be returned if you do.   */  void dm_bm_set_read_only(struct dm_block_manager *bm); +void dm_bm_set_read_write(struct dm_block_manager *bm);  u32 dm_bm_checksum(const void *data, size_t len, u32 init_xor); diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c index 468e371ee9b..416060c2570 100644 --- a/drivers/md/persistent-data/dm-btree.c +++ b/drivers/md/persistent-data/dm-btree.c @@ -770,8 +770,8 @@ EXPORT_SYMBOL_GPL(dm_btree_insert_notify);  /*----------------------------------------------------------------*/ -static int find_highest_key(struct ro_spine *s, dm_block_t block, -			    uint64_t *result_key, dm_block_t *next_block) +static int find_key(struct ro_spine *s, dm_block_t block, bool find_highest, +		    uint64_t *result_key, dm_block_t *next_block)  {  	int i, r;  	uint32_t flags; @@ -788,7 +788,11 @@ static int find_highest_key(struct ro_spine *s, dm_block_t block,  		else  			i--; -		*result_key = le64_to_cpu(ro_node(s)->keys[i]); +		if (find_highest) +			*result_key = le64_to_cpu(ro_node(s)->keys[i]); +		else +			*result_key = le64_to_cpu(ro_node(s)->keys[0]); +  		if (next_block || flags & INTERNAL_NODE)  			block = value64(ro_node(s), i); @@ -799,16 +803,16 @@ static int find_highest_key(struct ro_spine *s, dm_block_t block,  	return 0;  } -int dm_btree_find_highest_key(struct dm_btree_info *info, dm_block_t root, -			      uint64_t *result_keys) +static int dm_btree_find_key(struct dm_btree_info *info, dm_block_t root, +			     bool find_highest, uint64_t *result_keys)  {  	int r = 0, count = 0, level;  	struct ro_spine spine;  	init_ro_spine(&spine, info);  	for (level = 0; level < info->levels; level++) { -		r = find_highest_key(&spine, root, result_keys + level, -				     level == info->levels - 1 ? NULL : &root); +		r = find_key(&spine, root, find_highest, result_keys + level, +			     level == info->levels - 1 ? NULL : &root);  		if (r == -ENODATA) {  			r = 0;  			break; @@ -822,8 +826,23 @@ int dm_btree_find_highest_key(struct dm_btree_info *info, dm_block_t root,  	return r ? r : count;  } + +int dm_btree_find_highest_key(struct dm_btree_info *info, dm_block_t root, +			      uint64_t *result_keys) +{ +	return dm_btree_find_key(info, root, true, result_keys); +}  EXPORT_SYMBOL_GPL(dm_btree_find_highest_key); +int dm_btree_find_lowest_key(struct dm_btree_info *info, dm_block_t root, +			     uint64_t *result_keys) +{ +	return dm_btree_find_key(info, root, false, result_keys); +} +EXPORT_SYMBOL_GPL(dm_btree_find_lowest_key); + +/*----------------------------------------------------------------*/ +  /*   * FIXME: We shouldn't use a recursive algorithm when we have limited stack   * space.  Also this only works for single level trees. diff --git a/drivers/md/persistent-data/dm-btree.h b/drivers/md/persistent-data/dm-btree.h index 8672d159e0b..dacfc34180b 100644 --- a/drivers/md/persistent-data/dm-btree.h +++ b/drivers/md/persistent-data/dm-btree.h @@ -137,6 +137,14 @@ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,  /*   * Returns < 0 on failure.  Otherwise the number of key entries that have   * been filled out.  Remember trees can have zero entries, and as such have + * no lowest key. + */ +int dm_btree_find_lowest_key(struct dm_btree_info *info, dm_block_t root, +			     uint64_t *result_keys); + +/* + * Returns < 0 on failure.  Otherwise the number of key entries that have + * been filled out.  Remember trees can have zero entries, and as such have   * no highest key.   */  int dm_btree_find_highest_key(struct dm_btree_info *info, dm_block_t root, diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c index 6058569fe86..aacbe70c2c2 100644 --- a/drivers/md/persistent-data/dm-space-map-common.c +++ b/drivers/md/persistent-data/dm-space-map-common.c @@ -245,6 +245,10 @@ int sm_ll_extend(struct ll_disk *ll, dm_block_t extra_blocks)  		return -EINVAL;  	} +	/* +	 * We need to set this before the dm_tm_new_block() call below. +	 */ +	ll->nr_blocks = nr_blocks;  	for (i = old_blocks; i < blocks; i++) {  		struct dm_block *b;  		struct disk_index_entry idx; @@ -252,6 +256,7 @@ int sm_ll_extend(struct ll_disk *ll, dm_block_t extra_blocks)  		r = dm_tm_new_block(ll->tm, &dm_sm_bitmap_validator, &b);  		if (r < 0)  			return r; +  		idx.blocknr = cpu_to_le64(dm_block_location(b));  		r = dm_tm_unlock(ll->tm, b); @@ -266,7 +271,6 @@ int sm_ll_extend(struct ll_disk *ll, dm_block_t extra_blocks)  			return r;  	} -	ll->nr_blocks = nr_blocks;  	return 0;  } @@ -381,7 +385,7 @@ int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,  }  static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b, -			uint32_t (*mutator)(void *context, uint32_t old), +			int (*mutator)(void *context, uint32_t old, uint32_t *new),  			void *context, enum allocation_event *ev)  {  	int r; @@ -410,11 +414,17 @@ static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b,  	if (old > 2) {  		r = sm_ll_lookup_big_ref_count(ll, b, &old); -		if (r < 0) +		if (r < 0) { +			dm_tm_unlock(ll->tm, nb);  			return r; +		}  	} -	ref_count = mutator(context, old); +	r = mutator(context, old, &ref_count); +	if (r) { +		dm_tm_unlock(ll->tm, nb); +		return r; +	}  	if (ref_count <= 2) {  		sm_set_bitmap(bm_le, bit, ref_count); @@ -465,9 +475,10 @@ static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b,  	return ll->save_ie(ll, index, &ie_disk);  } -static uint32_t set_ref_count(void *context, uint32_t old) +static int set_ref_count(void *context, uint32_t old, uint32_t *new)  { -	return *((uint32_t *) context); +	*new = *((uint32_t *) context); +	return 0;  }  int sm_ll_insert(struct ll_disk *ll, dm_block_t b, @@ -476,9 +487,10 @@ int sm_ll_insert(struct ll_disk *ll, dm_block_t b,  	return sm_ll_mutate(ll, b, set_ref_count, &ref_count, ev);  } -static uint32_t inc_ref_count(void *context, uint32_t old) +static int inc_ref_count(void *context, uint32_t old, uint32_t *new)  { -	return old + 1; +	*new = old + 1; +	return 0;  }  int sm_ll_inc(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev) @@ -486,9 +498,15 @@ int sm_ll_inc(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev)  	return sm_ll_mutate(ll, b, inc_ref_count, NULL, ev);  } -static uint32_t dec_ref_count(void *context, uint32_t old) +static int dec_ref_count(void *context, uint32_t old, uint32_t *new)  { -	return old - 1; +	if (!old) { +		DMERR_LIMIT("unable to decrement a reference count below 0"); +		return -EINVAL; +	} + +	*new = old - 1; +	return 0;  }  int sm_ll_dec(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev) diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c index e735a6d5a79..cfbf9617e46 100644 --- a/drivers/md/persistent-data/dm-space-map-disk.c +++ b/drivers/md/persistent-data/dm-space-map-disk.c @@ -140,26 +140,10 @@ static int sm_disk_inc_block(struct dm_space_map *sm, dm_block_t b)  static int sm_disk_dec_block(struct dm_space_map *sm, dm_block_t b)  { -	int r; -	uint32_t old_count;  	enum allocation_event ev;  	struct sm_disk *smd = container_of(sm, struct sm_disk, sm); -	r = sm_ll_dec(&smd->ll, b, &ev); -	if (!r && (ev == SM_FREE)) { -		/* -		 * It's only free if it's also free in the last -		 * transaction. -		 */ -		r = sm_ll_lookup(&smd->old_ll, b, &old_count); -		if (r) -			return r; - -		if (!old_count) -			smd->nr_allocated_this_transaction--; -	} - -	return r; +	return sm_ll_dec(&smd->ll, b, &ev);  }  static int sm_disk_new_block(struct dm_space_map *sm, dm_block_t *b) diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c index 1c959684cae..786b689bdfc 100644 --- a/drivers/md/persistent-data/dm-space-map-metadata.c +++ b/drivers/md/persistent-data/dm-space-map-metadata.c @@ -91,6 +91,69 @@ struct block_op {  	dm_block_t block;  }; +struct bop_ring_buffer { +	unsigned begin; +	unsigned end; +	struct block_op bops[MAX_RECURSIVE_ALLOCATIONS + 1]; +}; + +static void brb_init(struct bop_ring_buffer *brb) +{ +	brb->begin = 0; +	brb->end = 0; +} + +static bool brb_empty(struct bop_ring_buffer *brb) +{ +	return brb->begin == brb->end; +} + +static unsigned brb_next(struct bop_ring_buffer *brb, unsigned old) +{ +	unsigned r = old + 1; +	return (r >= (sizeof(brb->bops) / sizeof(*brb->bops))) ? 0 : r; +} + +static int brb_push(struct bop_ring_buffer *brb, +		    enum block_op_type type, dm_block_t b) +{ +	struct block_op *bop; +	unsigned next = brb_next(brb, brb->end); + +	/* +	 * We don't allow the last bop to be filled, this way we can +	 * differentiate between full and empty. +	 */ +	if (next == brb->begin) +		return -ENOMEM; + +	bop = brb->bops + brb->end; +	bop->type = type; +	bop->block = b; + +	brb->end = next; + +	return 0; +} + +static int brb_pop(struct bop_ring_buffer *brb, struct block_op *result) +{ +	struct block_op *bop; + +	if (brb_empty(brb)) +		return -ENODATA; + +	bop = brb->bops + brb->begin; +	result->type = bop->type; +	result->block = bop->block; + +	brb->begin = brb_next(brb, brb->begin); + +	return 0; +} + +/*----------------------------------------------------------------*/ +  struct sm_metadata {  	struct dm_space_map sm; @@ -101,25 +164,20 @@ struct sm_metadata {  	unsigned recursion_count;  	unsigned allocated_this_transaction; -	unsigned nr_uncommitted; -	struct block_op uncommitted[MAX_RECURSIVE_ALLOCATIONS]; +	struct bop_ring_buffer uncommitted;  	struct threshold threshold;  };  static int add_bop(struct sm_metadata *smm, enum block_op_type type, dm_block_t b)  { -	struct block_op *op; +	int r = brb_push(&smm->uncommitted, type, b); -	if (smm->nr_uncommitted == MAX_RECURSIVE_ALLOCATIONS) { +	if (r) {  		DMERR("too many recursive allocations");  		return -ENOMEM;  	} -	op = smm->uncommitted + smm->nr_uncommitted++; -	op->type = type; -	op->block = b; -  	return 0;  } @@ -158,11 +216,17 @@ static int out(struct sm_metadata *smm)  		return -ENOMEM;  	} -	if (smm->recursion_count == 1 && smm->nr_uncommitted) { -		while (smm->nr_uncommitted && !r) { -			smm->nr_uncommitted--; -			r = commit_bop(smm, smm->uncommitted + -				       smm->nr_uncommitted); +	if (smm->recursion_count == 1) { +		while (!brb_empty(&smm->uncommitted)) { +			struct block_op bop; + +			r = brb_pop(&smm->uncommitted, &bop); +			if (r) { +				DMERR("bug in bop ring buffer"); +				break; +			} + +			r = commit_bop(smm, &bop);  			if (r)  				break;  		} @@ -217,7 +281,8 @@ static int sm_metadata_get_nr_free(struct dm_space_map *sm, dm_block_t *count)  static int sm_metadata_get_count(struct dm_space_map *sm, dm_block_t b,  				 uint32_t *result)  { -	int r, i; +	int r; +	unsigned i;  	struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);  	unsigned adjustment = 0; @@ -225,8 +290,10 @@ static int sm_metadata_get_count(struct dm_space_map *sm, dm_block_t b,  	 * We may have some uncommitted adjustments to add.  This list  	 * should always be really short.  	 */ -	for (i = 0; i < smm->nr_uncommitted; i++) { -		struct block_op *op = smm->uncommitted + i; +	for (i = smm->uncommitted.begin; +	     i != smm->uncommitted.end; +	     i = brb_next(&smm->uncommitted, i)) { +		struct block_op *op = smm->uncommitted.bops + i;  		if (op->block != b)  			continue; @@ -254,7 +321,8 @@ static int sm_metadata_get_count(struct dm_space_map *sm, dm_block_t b,  static int sm_metadata_count_is_more_than_one(struct dm_space_map *sm,  					      dm_block_t b, int *result)  { -	int r, i, adjustment = 0; +	int r, adjustment = 0; +	unsigned i;  	struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);  	uint32_t rc; @@ -262,8 +330,11 @@ static int sm_metadata_count_is_more_than_one(struct dm_space_map *sm,  	 * We may have some uncommitted adjustments to add.  This list  	 * should always be really short.  	 */ -	for (i = 0; i < smm->nr_uncommitted; i++) { -		struct block_op *op = smm->uncommitted + i; +	for (i = smm->uncommitted.begin; +	     i != smm->uncommitted.end; +	     i = brb_next(&smm->uncommitted, i)) { + +		struct block_op *op = smm->uncommitted.bops + i;  		if (op->block != b)  			continue; @@ -384,12 +455,16 @@ static int sm_metadata_new_block(struct dm_space_map *sm, dm_block_t *b)  	struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);  	int r = sm_metadata_new_block_(sm, b); -	if (r) -		DMERR("unable to allocate new metadata block"); +	if (r) { +		DMERR_LIMIT("unable to allocate new metadata block"); +		return r; +	}  	r = sm_metadata_get_nr_free(sm, &count); -	if (r) -		DMERR("couldn't get free block count"); +	if (r) { +		DMERR_LIMIT("couldn't get free block count"); +		return r; +	}  	check_threshold(&smm->threshold, count); @@ -604,20 +679,38 @@ static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)  	 * Flick into a mode where all blocks get allocated in the new area.  	 */  	smm->begin = old_len; -	memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm)); +	memcpy(sm, &bootstrap_ops, sizeof(*sm));  	/*  	 * Extend.  	 */  	r = sm_ll_extend(&smm->ll, extra_blocks); +	if (r) +		goto out;  	/* -	 * Switch back to normal behaviour. +	 * We repeatedly increment then commit until the commit doesn't +	 * allocate any new blocks.  	 */ -	memcpy(&smm->sm, &ops, sizeof(smm->sm)); -	for (i = old_len; !r && i < smm->begin; i++) -		r = sm_ll_inc(&smm->ll, i, &ev); +	do { +		for (i = old_len; !r && i < smm->begin; i++) { +			r = sm_ll_inc(&smm->ll, i, &ev); +			if (r) +				goto out; +		} +		old_len = smm->begin; + +		r = sm_ll_commit(&smm->ll); +		if (r) +			goto out; + +	} while (old_len != smm->begin); +out: +	/* +	 * Switch back to normal behaviour. +	 */ +	memcpy(sm, &ops, sizeof(*sm));  	return r;  } @@ -649,7 +742,7 @@ int dm_sm_metadata_create(struct dm_space_map *sm,  	smm->begin = superblock + 1;  	smm->recursion_count = 0;  	smm->allocated_this_transaction = 0; -	smm->nr_uncommitted = 0; +	brb_init(&smm->uncommitted);  	threshold_init(&smm->threshold);  	memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm)); @@ -658,6 +751,8 @@ int dm_sm_metadata_create(struct dm_space_map *sm,  	if (r)  		return r; +	if (nr_blocks > DM_SM_METADATA_MAX_BLOCKS) +		nr_blocks = DM_SM_METADATA_MAX_BLOCKS;  	r = sm_ll_extend(&smm->ll, nr_blocks);  	if (r)  		return r; @@ -691,7 +786,7 @@ int dm_sm_metadata_open(struct dm_space_map *sm,  	smm->begin = 0;  	smm->recursion_count = 0;  	smm->allocated_this_transaction = 0; -	smm->nr_uncommitted = 0; +	brb_init(&smm->uncommitted);  	threshold_init(&smm->threshold);  	memcpy(&smm->old_ll, &smm->ll, sizeof(smm->old_ll)); diff --git a/drivers/md/persistent-data/dm-space-map-metadata.h b/drivers/md/persistent-data/dm-space-map-metadata.h index 39bba0801cf..64df923974d 100644 --- a/drivers/md/persistent-data/dm-space-map-metadata.h +++ b/drivers/md/persistent-data/dm-space-map-metadata.h @@ -9,6 +9,17 @@  #include "dm-transaction-manager.h" +#define DM_SM_METADATA_BLOCK_SIZE (4096 >> SECTOR_SHIFT) + +/* + * The metadata device is currently limited in size. + * + * We have one block of index, which can hold 255 index entries.  Each + * index entry contains allocation info about ~16k metadata blocks. + */ +#define DM_SM_METADATA_MAX_BLOCKS (255 * ((1 << 14) - 64)) +#define DM_SM_METADATA_MAX_SECTORS (DM_SM_METADATA_MAX_BLOCKS * DM_SM_METADATA_BLOCK_SIZE) +  /*   * Unfortunately we have to use two-phase construction due to the cycle   * between the tm and sm. diff --git a/drivers/md/persistent-data/dm-transaction-manager.c b/drivers/md/persistent-data/dm-transaction-manager.c index 81da1a26042..3bc30a0ae3d 100644 --- a/drivers/md/persistent-data/dm-transaction-manager.c +++ b/drivers/md/persistent-data/dm-transaction-manager.c @@ -154,7 +154,7 @@ int dm_tm_pre_commit(struct dm_transaction_manager *tm)  	if (r < 0)  		return r; -	return 0; +	return dm_bm_flush(tm->bm);  }  EXPORT_SYMBOL_GPL(dm_tm_pre_commit); @@ -164,8 +164,9 @@ int dm_tm_commit(struct dm_transaction_manager *tm, struct dm_block *root)  		return -EWOULDBLOCK;  	wipe_shadow_table(tm); +	dm_bm_unlock(root); -	return dm_bm_flush_and_unlock(tm->bm, root); +	return dm_bm_flush(tm->bm);  }  EXPORT_SYMBOL_GPL(dm_tm_commit); diff --git a/drivers/md/persistent-data/dm-transaction-manager.h b/drivers/md/persistent-data/dm-transaction-manager.h index b5b139076ca..2772ed2a781 100644 --- a/drivers/md/persistent-data/dm-transaction-manager.h +++ b/drivers/md/persistent-data/dm-transaction-manager.h @@ -38,18 +38,17 @@ struct dm_transaction_manager *dm_tm_create_non_blocking_clone(struct dm_transac  /*   * We use a 2-phase commit here.   * - * i) In the first phase the block manager is told to start flushing, and - * the changes to the space map are written to disk.  You should interrogate - * your particular space map to get detail of its root node etc. to be - * included in your superblock. + * i) Make all changes for the transaction *except* for the superblock. + * Then call dm_tm_pre_commit() to flush them to disk.   * - * ii) @root will be committed last.  You shouldn't use more than the - * first 512 bytes of @root if you wish the transaction to survive a power - * failure.  You *must* have a write lock held on @root for both stage (i) - * and (ii).  The commit will drop the write lock. + * ii) Lock your superblock.  Update.  Then call dm_tm_commit() which will + * unlock the superblock and flush it.  No other blocks should be updated + * during this period.  Care should be taken to never unlock a partially + * updated superblock; perform any operations that could fail *before* you + * take the superblock lock.   */  int dm_tm_pre_commit(struct dm_transaction_manager *tm); -int dm_tm_commit(struct dm_transaction_manager *tm, struct dm_block *root); +int dm_tm_commit(struct dm_transaction_manager *tm, struct dm_block *superblock);  /*   * These methods are the only way to get hold of a writeable block. diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index c4d420b7d2f..407a99e46f6 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -501,10 +501,11 @@ static inline int is_io_in_chunk_boundary(struct mddev *mddev,  			unsigned int chunk_sects, struct bio *bio)  {  	if (likely(is_power_of_2(chunk_sects))) { -		return chunk_sects >= ((bio->bi_sector & (chunk_sects-1)) +		return chunk_sects >= +			((bio->bi_iter.bi_sector & (chunk_sects-1))  					+ bio_sectors(bio));  	} else{ -		sector_t sector = bio->bi_sector; +		sector_t sector = bio->bi_iter.bi_sector;  		return chunk_sects >= (sector_div(sector, chunk_sects)  						+ bio_sectors(bio));  	} @@ -512,64 +513,44 @@ static inline int is_io_in_chunk_boundary(struct mddev *mddev,  static void raid0_make_request(struct mddev *mddev, struct bio *bio)  { -	unsigned int chunk_sects; -	sector_t sector_offset;  	struct strip_zone *zone;  	struct md_rdev *tmp_dev; +	struct bio *split;  	if (unlikely(bio->bi_rw & REQ_FLUSH)) {  		md_flush_request(mddev, bio);  		return;  	} -	chunk_sects = mddev->chunk_sectors; -	if (unlikely(!is_io_in_chunk_boundary(mddev, chunk_sects, bio))) { -		sector_t sector = bio->bi_sector; -		struct bio_pair *bp; -		/* Sanity check -- queue functions should prevent this happening */ -		if (bio_segments(bio) > 1) -			goto bad_map; -		/* This is a one page bio that upper layers -		 * refuse to split for us, so we need to split it. -		 */ -		if (likely(is_power_of_2(chunk_sects))) -			bp = bio_split(bio, chunk_sects - (sector & -							   (chunk_sects-1))); -		else -			bp = bio_split(bio, chunk_sects - -				       sector_div(sector, chunk_sects)); -		raid0_make_request(mddev, &bp->bio1); -		raid0_make_request(mddev, &bp->bio2); -		bio_pair_release(bp); -		return; -	} +	do { +		sector_t sector = bio->bi_iter.bi_sector; +		unsigned chunk_sects = mddev->chunk_sectors; -	sector_offset = bio->bi_sector; -	zone = find_zone(mddev->private, §or_offset); -	tmp_dev = map_sector(mddev, zone, bio->bi_sector, -			     §or_offset); -	bio->bi_bdev = tmp_dev->bdev; -	bio->bi_sector = sector_offset + zone->dev_start + -		tmp_dev->data_offset; - -	if (unlikely((bio->bi_rw & REQ_DISCARD) && -		     !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) { -		/* Just ignore it */ -		bio_endio(bio, 0); -		return; -	} +		unsigned sectors = chunk_sects - +			(likely(is_power_of_2(chunk_sects)) +			 ? (sector & (chunk_sects-1)) +			 : sector_div(sector, chunk_sects)); -	generic_make_request(bio); -	return; - -bad_map: -	printk("md/raid0:%s: make_request bug: can't convert block across chunks" -	       " or bigger than %dk %llu %d\n", -	       mdname(mddev), chunk_sects / 2, -	       (unsigned long long)bio->bi_sector, bio_sectors(bio) / 2); +		if (sectors < bio_sectors(bio)) { +			split = bio_split(bio, sectors, GFP_NOIO, fs_bio_set); +			bio_chain(split, bio); +		} else { +			split = bio; +		} -	bio_io_error(bio); -	return; +		zone = find_zone(mddev->private, §or); +		tmp_dev = map_sector(mddev, zone, sector, §or); +		split->bi_bdev = tmp_dev->bdev; +		split->bi_iter.bi_sector = sector + zone->dev_start + +			tmp_dev->data_offset; + +		if (unlikely((split->bi_rw & REQ_DISCARD) && +			 !blk_queue_discard(bdev_get_queue(split->bi_bdev)))) { +			/* Just ignore it */ +			bio_endio(split, 0); +		} else +			generic_make_request(split); +	} while (split != bio);  }  static void raid0_status(struct seq_file *seq, struct mddev *mddev) diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index d60412c7f99..56e24c072b6 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -66,7 +66,8 @@   */  static int max_queued_requests = 1024; -static void allow_barrier(struct r1conf *conf); +static void allow_barrier(struct r1conf *conf, sector_t start_next_window, +			  sector_t bi_sector);  static void lower_barrier(struct r1conf *conf);  static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data) @@ -84,16 +85,19 @@ static void r1bio_pool_free(void *r1_bio, void *data)  }  #define RESYNC_BLOCK_SIZE (64*1024) -//#define RESYNC_BLOCK_SIZE PAGE_SIZE +#define RESYNC_DEPTH 32  #define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)  #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE) -#define RESYNC_WINDOW (2048*1024) +#define RESYNC_WINDOW (RESYNC_BLOCK_SIZE * RESYNC_DEPTH) +#define RESYNC_WINDOW_SECTORS (RESYNC_WINDOW >> 9) +#define NEXT_NORMALIO_DISTANCE (3 * RESYNC_WINDOW_SECTORS)  static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)  {  	struct pool_info *pi = data;  	struct r1bio *r1_bio;  	struct bio *bio; +	int need_pages;  	int i, j;  	r1_bio = r1bio_pool_alloc(gfp_flags, pi); @@ -116,15 +120,15 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)  	 * RESYNC_PAGES for each bio.  	 */  	if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) -		j = pi->raid_disks; +		need_pages = pi->raid_disks;  	else -		j = 1; -	while(j--) { +		need_pages = 1; +	for (j = 0; j < need_pages; j++) {  		bio = r1_bio->bios[j];  		bio->bi_vcnt = RESYNC_PAGES;  		if (bio_alloc_pages(bio, gfp_flags)) -			goto out_free_bio; +			goto out_free_pages;  	}  	/* If not user-requests, copy the page pointers to all bios */  	if (!test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) { @@ -138,6 +142,14 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)  	return r1_bio; +out_free_pages: +	while (--j >= 0) { +		struct bio_vec *bv; + +		bio_for_each_segment_all(bv, r1_bio->bios[j], i) +			__free_page(bv->bv_page); +	} +  out_free_bio:  	while (++j < pi->raid_disks)  		bio_put(r1_bio->bios[j]); @@ -225,6 +237,8 @@ static void call_bio_endio(struct r1bio *r1_bio)  	struct bio *bio = r1_bio->master_bio;  	int done;  	struct r1conf *conf = r1_bio->mddev->private; +	sector_t start_next_window = r1_bio->start_next_window; +	sector_t bi_sector = bio->bi_iter.bi_sector;  	if (bio->bi_phys_segments) {  		unsigned long flags; @@ -232,6 +246,11 @@ static void call_bio_endio(struct r1bio *r1_bio)  		bio->bi_phys_segments--;  		done = (bio->bi_phys_segments == 0);  		spin_unlock_irqrestore(&conf->device_lock, flags); +		/* +		 * make_request() might be waiting for +		 * bi_phys_segments to decrease +		 */ +		wake_up(&conf->wait_barrier);  	} else  		done = 1; @@ -243,7 +262,7 @@ static void call_bio_endio(struct r1bio *r1_bio)  		 * Wake up any possible resync thread that waits for the device  		 * to go idle.  		 */ -		allow_barrier(conf); +		allow_barrier(conf, start_next_window, bi_sector);  	}  } @@ -255,9 +274,8 @@ static void raid_end_bio_io(struct r1bio *r1_bio)  	if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {  		pr_debug("raid1: sync end %s on sectors %llu-%llu\n",  			 (bio_data_dir(bio) == WRITE) ? "write" : "read", -			 (unsigned long long) bio->bi_sector, -			 (unsigned long long) bio->bi_sector + -			 bio_sectors(bio) - 1); +			 (unsigned long long) bio->bi_iter.bi_sector, +			 (unsigned long long) bio_end_sector(bio) - 1);  		call_bio_endio(r1_bio);  	} @@ -456,9 +474,8 @@ static void raid1_end_write_request(struct bio *bio, int error)  				struct bio *mbio = r1_bio->master_bio;  				pr_debug("raid1: behind end write sectors"  					 " %llu-%llu\n", -					 (unsigned long long) mbio->bi_sector, -					 (unsigned long long) mbio->bi_sector + -					 bio_sectors(mbio) - 1); +					 (unsigned long long) mbio->bi_iter.bi_sector, +					 (unsigned long long) bio_end_sector(mbio) - 1);  				call_bio_endio(r1_bio);  			}  		} @@ -814,8 +831,6 @@ static void flush_pending_writes(struct r1conf *conf)   *    there is no normal IO happeing.  It must arrange to call   *    lower_barrier when the particular background IO completes.   */ -#define RESYNC_DEPTH 32 -  static void raise_barrier(struct r1conf *conf)  {  	spin_lock_irq(&conf->resync_lock); @@ -827,9 +842,19 @@ static void raise_barrier(struct r1conf *conf)  	/* block any new IO from starting */  	conf->barrier++; -	/* Now wait for all pending IO to complete */ +	/* For these conditions we must wait: +	 * A: while the array is in frozen state +	 * B: while barrier >= RESYNC_DEPTH, meaning resync reach +	 *    the max count which allowed. +	 * C: next_resync + RESYNC_SECTORS > start_next_window, meaning +	 *    next resync will reach to the window which normal bios are +	 *    handling. +	 */  	wait_event_lock_irq(conf->wait_barrier, -			    !conf->nr_pending && conf->barrier < RESYNC_DEPTH, +			    !conf->array_frozen && +			    conf->barrier < RESYNC_DEPTH && +			    (conf->start_next_window >= +			     conf->next_resync + RESYNC_SECTORS),  			    conf->resync_lock);  	spin_unlock_irq(&conf->resync_lock); @@ -845,10 +870,33 @@ static void lower_barrier(struct r1conf *conf)  	wake_up(&conf->wait_barrier);  } -static void wait_barrier(struct r1conf *conf) +static bool need_to_wait_for_sync(struct r1conf *conf, struct bio *bio)  { +	bool wait = false; + +	if (conf->array_frozen || !bio) +		wait = true; +	else if (conf->barrier && bio_data_dir(bio) == WRITE) { +		if (conf->next_resync < RESYNC_WINDOW_SECTORS) +			wait = true; +		else if ((conf->next_resync - RESYNC_WINDOW_SECTORS +				>= bio_end_sector(bio)) || +			 (conf->next_resync + NEXT_NORMALIO_DISTANCE +				<= bio->bi_iter.bi_sector)) +			wait = false; +		else +			wait = true; +	} + +	return wait; +} + +static sector_t wait_barrier(struct r1conf *conf, struct bio *bio) +{ +	sector_t sector = 0; +  	spin_lock_irq(&conf->resync_lock); -	if (conf->barrier) { +	if (need_to_wait_for_sync(conf, bio)) {  		conf->nr_waiting++;  		/* Wait for the barrier to drop.  		 * However if there are already pending @@ -860,22 +908,66 @@ static void wait_barrier(struct r1conf *conf)  		 * count down.  		 */  		wait_event_lock_irq(conf->wait_barrier, -				    !conf->barrier || -				    (conf->nr_pending && +				    !conf->array_frozen && +				    (!conf->barrier || +				    ((conf->start_next_window < +				      conf->next_resync + RESYNC_SECTORS) &&  				     current->bio_list && -				     !bio_list_empty(current->bio_list)), +				     !bio_list_empty(current->bio_list))),  				    conf->resync_lock);  		conf->nr_waiting--;  	} + +	if (bio && bio_data_dir(bio) == WRITE) { +		if (conf->next_resync + NEXT_NORMALIO_DISTANCE +		    <= bio->bi_iter.bi_sector) { +			if (conf->start_next_window == MaxSector) +				conf->start_next_window = +					conf->next_resync + +					NEXT_NORMALIO_DISTANCE; + +			if ((conf->start_next_window + NEXT_NORMALIO_DISTANCE) +			    <= bio->bi_iter.bi_sector) +				conf->next_window_requests++; +			else +				conf->current_window_requests++; +			sector = conf->start_next_window; +		} +	} +  	conf->nr_pending++;  	spin_unlock_irq(&conf->resync_lock); +	return sector;  } -static void allow_barrier(struct r1conf *conf) +static void allow_barrier(struct r1conf *conf, sector_t start_next_window, +			  sector_t bi_sector)  {  	unsigned long flags; +  	spin_lock_irqsave(&conf->resync_lock, flags);  	conf->nr_pending--; +	if (start_next_window) { +		if (start_next_window == conf->start_next_window) { +			if (conf->start_next_window + NEXT_NORMALIO_DISTANCE +			    <= bi_sector) +				conf->next_window_requests--; +			else +				conf->current_window_requests--; +		} else +			conf->current_window_requests--; + +		if (!conf->current_window_requests) { +			if (conf->next_window_requests) { +				conf->current_window_requests = +					conf->next_window_requests; +				conf->next_window_requests = 0; +				conf->start_next_window += +					NEXT_NORMALIO_DISTANCE; +			} else +				conf->start_next_window = MaxSector; +		} +	}  	spin_unlock_irqrestore(&conf->resync_lock, flags);  	wake_up(&conf->wait_barrier);  } @@ -884,8 +976,7 @@ static void freeze_array(struct r1conf *conf, int extra)  {  	/* stop syncio and normal IO and wait for everything to  	 * go quite. -	 * We increment barrier and nr_waiting, and then -	 * wait until nr_pending match nr_queued+extra +	 * We wait until nr_pending match nr_queued+extra  	 * This is called in the context of one normal IO request  	 * that has failed. Thus any sync request that might be pending  	 * will be blocked by nr_pending, and we need to wait for @@ -895,8 +986,7 @@ static void freeze_array(struct r1conf *conf, int extra)  	 * we continue.  	 */  	spin_lock_irq(&conf->resync_lock); -	conf->barrier++; -	conf->nr_waiting++; +	conf->array_frozen = 1;  	wait_event_lock_irq_cmd(conf->wait_barrier,  				conf->nr_pending == conf->nr_queued+extra,  				conf->resync_lock, @@ -907,8 +997,7 @@ static void unfreeze_array(struct r1conf *conf)  {  	/* reverse the effect of the freeze */  	spin_lock_irq(&conf->resync_lock); -	conf->barrier--; -	conf->nr_waiting--; +	conf->array_frozen = 0;  	wake_up(&conf->wait_barrier);  	spin_unlock_irq(&conf->resync_lock);  } @@ -945,7 +1034,8 @@ do_sync_io:  		if (bvecs[i].bv_page)  			put_page(bvecs[i].bv_page);  	kfree(bvecs); -	pr_debug("%dB behind alloc failed, doing sync I/O\n", bio->bi_size); +	pr_debug("%dB behind alloc failed, doing sync I/O\n", +		 bio->bi_iter.bi_size);  }  struct raid1_plug_cb { @@ -1013,6 +1103,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)  	int first_clone;  	int sectors_handled;  	int max_sectors; +	sector_t start_next_window;  	/*  	 * Register the new request and wait if the reconstruction @@ -1024,7 +1115,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)  	if (bio_data_dir(bio) == WRITE &&  	    bio_end_sector(bio) > mddev->suspend_lo && -	    bio->bi_sector < mddev->suspend_hi) { +	    bio->bi_iter.bi_sector < mddev->suspend_hi) {  		/* As the suspend_* range is controlled by  		 * userspace, we want an interruptible  		 * wait. @@ -1035,14 +1126,14 @@ static void make_request(struct mddev *mddev, struct bio * bio)  			prepare_to_wait(&conf->wait_barrier,  					&w, TASK_INTERRUPTIBLE);  			if (bio_end_sector(bio) <= mddev->suspend_lo || -			    bio->bi_sector >= mddev->suspend_hi) +			    bio->bi_iter.bi_sector >= mddev->suspend_hi)  				break;  			schedule();  		}  		finish_wait(&conf->wait_barrier, &w);  	} -	wait_barrier(conf); +	start_next_window = wait_barrier(conf, bio);  	bitmap = mddev->bitmap; @@ -1057,7 +1148,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)  	r1_bio->sectors = bio_sectors(bio);  	r1_bio->state = 0;  	r1_bio->mddev = mddev; -	r1_bio->sector = bio->bi_sector; +	r1_bio->sector = bio->bi_iter.bi_sector;  	/* We might need to issue multiple reads to different  	 * devices if there are bad blocks around, so we keep @@ -1097,12 +1188,13 @@ read_again:  		r1_bio->read_disk = rdisk;  		read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev); -		md_trim_bio(read_bio, r1_bio->sector - bio->bi_sector, -			    max_sectors); +		bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector, +			 max_sectors);  		r1_bio->bios[rdisk] = read_bio; -		read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset; +		read_bio->bi_iter.bi_sector = r1_bio->sector + +			mirror->rdev->data_offset;  		read_bio->bi_bdev = mirror->rdev->bdev;  		read_bio->bi_end_io = raid1_end_read_request;  		read_bio->bi_rw = READ | do_sync; @@ -1114,7 +1206,7 @@ read_again:  			 */  			sectors_handled = (r1_bio->sector + max_sectors -					   - bio->bi_sector); +					   - bio->bi_iter.bi_sector);  			r1_bio->sectors = max_sectors;  			spin_lock_irq(&conf->device_lock);  			if (bio->bi_phys_segments == 0) @@ -1135,7 +1227,8 @@ read_again:  			r1_bio->sectors = bio_sectors(bio) - sectors_handled;  			r1_bio->state = 0;  			r1_bio->mddev = mddev; -			r1_bio->sector = bio->bi_sector + sectors_handled; +			r1_bio->sector = bio->bi_iter.bi_sector + +				sectors_handled;  			goto read_again;  		} else  			generic_make_request(read_bio); @@ -1163,6 +1256,7 @@ read_again:  	disks = conf->raid_disks * 2;   retry_write: +	r1_bio->start_next_window = start_next_window;  	blocked_rdev = NULL;  	rcu_read_lock();  	max_sectors = r1_bio->sectors; @@ -1231,14 +1325,24 @@ read_again:  	if (unlikely(blocked_rdev)) {  		/* Wait for this device to become unblocked */  		int j; +		sector_t old = start_next_window;  		for (j = 0; j < i; j++)  			if (r1_bio->bios[j])  				rdev_dec_pending(conf->mirrors[j].rdev, mddev);  		r1_bio->state = 0; -		allow_barrier(conf); +		allow_barrier(conf, start_next_window, bio->bi_iter.bi_sector);  		md_wait_for_blocked_rdev(blocked_rdev, mddev); -		wait_barrier(conf); +		start_next_window = wait_barrier(conf, bio); +		/* +		 * We must make sure the multi r1bios of bio have +		 * the same value of bi_phys_segments +		 */ +		if (bio->bi_phys_segments && old && +		    old != start_next_window) +			/* Wait for the former r1bio(s) to complete */ +			wait_event(conf->wait_barrier, +				   bio->bi_phys_segments == 1);  		goto retry_write;  	} @@ -1254,7 +1358,7 @@ read_again:  			bio->bi_phys_segments++;  		spin_unlock_irq(&conf->device_lock);  	} -	sectors_handled = r1_bio->sector + max_sectors - bio->bi_sector; +	sectors_handled = r1_bio->sector + max_sectors - bio->bi_iter.bi_sector;  	atomic_set(&r1_bio->remaining, 1);  	atomic_set(&r1_bio->behind_remaining, 0); @@ -1266,7 +1370,7 @@ read_again:  			continue;  		mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); -		md_trim_bio(mbio, r1_bio->sector - bio->bi_sector, max_sectors); +		bio_trim(mbio, r1_bio->sector - bio->bi_iter.bi_sector, max_sectors);  		if (first_clone) {  			/* do behind I/O ? @@ -1300,7 +1404,7 @@ read_again:  		r1_bio->bios[i] = mbio; -		mbio->bi_sector	= (r1_bio->sector + +		mbio->bi_iter.bi_sector	= (r1_bio->sector +  				   conf->mirrors[i].rdev->data_offset);  		mbio->bi_bdev = conf->mirrors[i].rdev->bdev;  		mbio->bi_end_io	= raid1_end_write_request; @@ -1340,7 +1444,7 @@ read_again:  		r1_bio->sectors = bio_sectors(bio) - sectors_handled;  		r1_bio->state = 0;  		r1_bio->mddev = mddev; -		r1_bio->sector = bio->bi_sector + sectors_handled; +		r1_bio->sector = bio->bi_iter.bi_sector + sectors_handled;  		goto retry_write;  	} @@ -1438,11 +1542,14 @@ static void print_conf(struct r1conf *conf)  static void close_sync(struct r1conf *conf)  { -	wait_barrier(conf); -	allow_barrier(conf); +	wait_barrier(conf, NULL); +	allow_barrier(conf, 0, 0);  	mempool_destroy(conf->r1buf_pool);  	conf->r1buf_pool = NULL; + +	conf->next_resync = 0; +	conf->start_next_window = MaxSector;  }  static int raid1_spare_active(struct mddev *mddev) @@ -1479,6 +1586,7 @@ static int raid1_spare_active(struct mddev *mddev)  			}  		}  		if (rdev +		    && rdev->recovery_offset == MaxSector  		    && !test_bit(Faulty, &rdev->flags)  		    && !test_and_set_bit(In_sync, &rdev->flags)) {  			count++; @@ -1854,20 +1962,24 @@ static int process_checks(struct r1bio *r1_bio)  	for (i = 0; i < conf->raid_disks * 2; i++) {  		int j;  		int size; +		int uptodate;  		struct bio *b = r1_bio->bios[i];  		if (b->bi_end_io != end_sync_read)  			continue; -		/* fixup the bio for reuse */ +		/* fixup the bio for reuse, but preserve BIO_UPTODATE */ +		uptodate = test_bit(BIO_UPTODATE, &b->bi_flags);  		bio_reset(b); +		if (!uptodate) +			clear_bit(BIO_UPTODATE, &b->bi_flags);  		b->bi_vcnt = vcnt; -		b->bi_size = r1_bio->sectors << 9; -		b->bi_sector = r1_bio->sector + +		b->bi_iter.bi_size = r1_bio->sectors << 9; +		b->bi_iter.bi_sector = r1_bio->sector +  			conf->mirrors[i].rdev->data_offset;  		b->bi_bdev = conf->mirrors[i].rdev->bdev;  		b->bi_end_io = end_sync_read;  		b->bi_private = r1_bio; -		size = b->bi_size; +		size = b->bi_iter.bi_size;  		for (j = 0; j < vcnt ; j++) {  			struct bio_vec *bi;  			bi = &b->bi_io_vec[j]; @@ -1891,11 +2003,14 @@ static int process_checks(struct r1bio *r1_bio)  		int j;  		struct bio *pbio = r1_bio->bios[primary];  		struct bio *sbio = r1_bio->bios[i]; +		int uptodate = test_bit(BIO_UPTODATE, &sbio->bi_flags);  		if (sbio->bi_end_io != end_sync_read)  			continue; +		/* Now we can 'fixup' the BIO_UPTODATE flag */ +		set_bit(BIO_UPTODATE, &sbio->bi_flags); -		if (test_bit(BIO_UPTODATE, &sbio->bi_flags)) { +		if (uptodate) {  			for (j = vcnt; j-- ; ) {  				struct page *p, *s;  				p = pbio->bi_io_vec[j].bv_page; @@ -1910,7 +2025,7 @@ static int process_checks(struct r1bio *r1_bio)  		if (j >= 0)  			atomic64_add(r1_bio->sectors, &mddev->resync_mismatches);  		if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery) -			      && test_bit(BIO_UPTODATE, &sbio->bi_flags))) { +			      && uptodate)) {  			/* No need to write to this device. */  			sbio->bi_end_io = NULL;  			rdev_dec_pending(conf->mirrors[i].rdev, mddev); @@ -2122,11 +2237,11 @@ static int narrow_write_error(struct r1bio *r1_bio, int i)  		}  		wbio->bi_rw = WRITE; -		wbio->bi_sector = r1_bio->sector; -		wbio->bi_size = r1_bio->sectors << 9; +		wbio->bi_iter.bi_sector = r1_bio->sector; +		wbio->bi_iter.bi_size = r1_bio->sectors << 9; -		md_trim_bio(wbio, sector - r1_bio->sector, sectors); -		wbio->bi_sector += rdev->data_offset; +		bio_trim(wbio, sector - r1_bio->sector, sectors); +		wbio->bi_iter.bi_sector += rdev->data_offset;  		wbio->bi_bdev = rdev->bdev;  		if (submit_bio_wait(WRITE, wbio) == 0)  			/* failure! */ @@ -2240,7 +2355,8 @@ read_more:  		}  		r1_bio->read_disk = disk;  		bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev); -		md_trim_bio(bio, r1_bio->sector - bio->bi_sector, max_sectors); +		bio_trim(bio, r1_bio->sector - bio->bi_iter.bi_sector, +			 max_sectors);  		r1_bio->bios[r1_bio->read_disk] = bio;  		rdev = conf->mirrors[disk].rdev;  		printk_ratelimited(KERN_ERR @@ -2249,7 +2365,7 @@ read_more:  				   mdname(mddev),  				   (unsigned long long)r1_bio->sector,  				   bdevname(rdev->bdev, b)); -		bio->bi_sector = r1_bio->sector + rdev->data_offset; +		bio->bi_iter.bi_sector = r1_bio->sector + rdev->data_offset;  		bio->bi_bdev = rdev->bdev;  		bio->bi_end_io = raid1_end_read_request;  		bio->bi_rw = READ | do_sync; @@ -2258,7 +2374,7 @@ read_more:  			/* Drat - have to split this up more */  			struct bio *mbio = r1_bio->master_bio;  			int sectors_handled = (r1_bio->sector + max_sectors -					       - mbio->bi_sector); +					       - mbio->bi_iter.bi_sector);  			r1_bio->sectors = max_sectors;  			spin_lock_irq(&conf->device_lock);  			if (mbio->bi_phys_segments == 0) @@ -2276,7 +2392,8 @@ read_more:  			r1_bio->state = 0;  			set_bit(R1BIO_ReadError, &r1_bio->state);  			r1_bio->mddev = mddev; -			r1_bio->sector = mbio->bi_sector + sectors_handled; +			r1_bio->sector = mbio->bi_iter.bi_sector + +				sectors_handled;  			goto read_more;  		} else @@ -2500,7 +2617,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp  		}  		if (bio->bi_end_io) {  			atomic_inc(&rdev->nr_pending); -			bio->bi_sector = sector_nr + rdev->data_offset; +			bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;  			bio->bi_bdev = rdev->bdev;  			bio->bi_private = r1_bio;  		} @@ -2600,7 +2717,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp  							continue;  						/* remove last page from this bio */  						bio->bi_vcnt--; -						bio->bi_size -= len; +						bio->bi_iter.bi_size -= len;  						bio->bi_flags &= ~(1<< BIO_SEG_VALID);  					}  					goto bio_full; @@ -2713,6 +2830,9 @@ static struct r1conf *setup_conf(struct mddev *mddev)  	conf->pending_count = 0;  	conf->recovery_disabled = mddev->recovery_disabled - 1; +	conf->start_next_window = MaxSector; +	conf->current_window_requests = conf->next_window_requests = 0; +  	err = -EIO;  	for (i = 0; i < conf->raid_disks * 2; i++) { @@ -2870,8 +2990,8 @@ static int stop(struct mddev *mddev)  			   atomic_read(&bitmap->behind_writes) == 0);  	} -	raise_barrier(conf); -	lower_barrier(conf); +	freeze_array(conf, 0); +	unfreeze_array(conf);  	md_unregister_thread(&mddev->thread);  	if (conf->r1bio_pool) @@ -3030,10 +3150,10 @@ static void raid1_quiesce(struct mddev *mddev, int state)  		wake_up(&conf->wait_barrier);  		break;  	case 1: -		raise_barrier(conf); +		freeze_array(conf, 0);  		break;  	case 0: -		lower_barrier(conf); +		unfreeze_array(conf);  		break;  	}  } @@ -3050,7 +3170,8 @@ static void *raid1_takeover(struct mddev *mddev)  		mddev->new_chunk_sectors = 0;  		conf = setup_conf(mddev);  		if (!IS_ERR(conf)) -			conf->barrier = 1; +			/* Array must appear to be quiesced */ +			conf->array_frozen = 1;  		return conf;  	}  	return ERR_PTR(-EINVAL); diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h index 0ff3715fb7e..9bebca7bff2 100644 --- a/drivers/md/raid1.h +++ b/drivers/md/raid1.h @@ -41,6 +41,19 @@ struct r1conf {  	 */  	sector_t		next_resync; +	/* When raid1 starts resync, we divide array into four partitions +	 * |---------|--------------|---------------------|-------------| +	 *        next_resync   start_next_window       end_window +	 * start_next_window = next_resync + NEXT_NORMALIO_DISTANCE +	 * end_window = start_next_window + NEXT_NORMALIO_DISTANCE +	 * current_window_requests means the count of normalIO between +	 *   start_next_window and end_window. +	 * next_window_requests means the count of normalIO after end_window. +	 * */ +	sector_t		start_next_window; +	int			current_window_requests; +	int			next_window_requests; +  	spinlock_t		device_lock;  	/* list of 'struct r1bio' that need to be processed by raid1d, @@ -65,6 +78,7 @@ struct r1conf {  	int			nr_waiting;  	int			nr_queued;  	int			barrier; +	int			array_frozen;  	/* Set to 1 if a full sync is needed, (fresh device added).  	 * Cleared when a sync completes. @@ -111,6 +125,7 @@ struct r1bio {  						 * in this BehindIO request  						 */  	sector_t		sector; +	sector_t		start_next_window;  	int			sectors;  	unsigned long		state;  	struct mddev		*mddev; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index df7b0a06b0e..cb882aae9e2 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1152,14 +1152,12 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)  	kfree(plug);  } -static void make_request(struct mddev *mddev, struct bio * bio) +static void __make_request(struct mddev *mddev, struct bio *bio)  {  	struct r10conf *conf = mddev->private;  	struct r10bio *r10_bio;  	struct bio *read_bio;  	int i; -	sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask); -	int chunk_sects = chunk_mask + 1;  	const int rw = bio_data_dir(bio);  	const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);  	const unsigned long do_fua = (bio->bi_rw & REQ_FUA); @@ -1174,61 +1172,6 @@ static void make_request(struct mddev *mddev, struct bio * bio)  	int max_sectors;  	int sectors; -	if (unlikely(bio->bi_rw & REQ_FLUSH)) { -		md_flush_request(mddev, bio); -		return; -	} - -	/* If this request crosses a chunk boundary, we need to -	 * split it.  This will only happen for 1 PAGE (or less) requests. -	 */ -	if (unlikely((bio->bi_sector & chunk_mask) + bio_sectors(bio) -		     > chunk_sects -		     && (conf->geo.near_copies < conf->geo.raid_disks -			 || conf->prev.near_copies < conf->prev.raid_disks))) { -		struct bio_pair *bp; -		/* Sanity check -- queue functions should prevent this happening */ -		if (bio_segments(bio) > 1) -			goto bad_map; -		/* This is a one page bio that upper layers -		 * refuse to split for us, so we need to split it. -		 */ -		bp = bio_split(bio, -			       chunk_sects - (bio->bi_sector & (chunk_sects - 1)) ); - -		/* Each of these 'make_request' calls will call 'wait_barrier'. -		 * If the first succeeds but the second blocks due to the resync -		 * thread raising the barrier, we will deadlock because the -		 * IO to the underlying device will be queued in generic_make_request -		 * and will never complete, so will never reduce nr_pending. -		 * So increment nr_waiting here so no new raise_barriers will -		 * succeed, and so the second wait_barrier cannot block. -		 */ -		spin_lock_irq(&conf->resync_lock); -		conf->nr_waiting++; -		spin_unlock_irq(&conf->resync_lock); - -		make_request(mddev, &bp->bio1); -		make_request(mddev, &bp->bio2); - -		spin_lock_irq(&conf->resync_lock); -		conf->nr_waiting--; -		wake_up(&conf->wait_barrier); -		spin_unlock_irq(&conf->resync_lock); - -		bio_pair_release(bp); -		return; -	bad_map: -		printk("md/raid10:%s: make_request bug: can't convert block across chunks" -		       " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2, -		       (unsigned long long)bio->bi_sector, bio_sectors(bio) / 2); - -		bio_io_error(bio); -		return; -	} - -	md_write_start(mddev, bio); -  	/*  	 * Register the new request and wait if the reconstruction  	 * thread has put up a bar for new requests. @@ -1238,24 +1181,25 @@ static void make_request(struct mddev *mddev, struct bio * bio)  	sectors = bio_sectors(bio);  	while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && -	    bio->bi_sector < conf->reshape_progress && -	    bio->bi_sector + sectors > conf->reshape_progress) { +	    bio->bi_iter.bi_sector < conf->reshape_progress && +	    bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {  		/* IO spans the reshape position.  Need to wait for  		 * reshape to pass  		 */  		allow_barrier(conf);  		wait_event(conf->wait_barrier, -			   conf->reshape_progress <= bio->bi_sector || -			   conf->reshape_progress >= bio->bi_sector + sectors); +			   conf->reshape_progress <= bio->bi_iter.bi_sector || +			   conf->reshape_progress >= bio->bi_iter.bi_sector + +			   sectors);  		wait_barrier(conf);  	}  	if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&  	    bio_data_dir(bio) == WRITE &&  	    (mddev->reshape_backwards -	     ? (bio->bi_sector < conf->reshape_safe && -		bio->bi_sector + sectors > conf->reshape_progress) -	     : (bio->bi_sector + sectors > conf->reshape_safe && -		bio->bi_sector < conf->reshape_progress))) { +	     ? (bio->bi_iter.bi_sector < conf->reshape_safe && +		bio->bi_iter.bi_sector + sectors > conf->reshape_progress) +	     : (bio->bi_iter.bi_sector + sectors > conf->reshape_safe && +		bio->bi_iter.bi_sector < conf->reshape_progress))) {  		/* Need to update reshape_position in metadata */  		mddev->reshape_position = conf->reshape_progress;  		set_bit(MD_CHANGE_DEVS, &mddev->flags); @@ -1273,7 +1217,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)  	r10_bio->sectors = sectors;  	r10_bio->mddev = mddev; -	r10_bio->sector = bio->bi_sector; +	r10_bio->sector = bio->bi_iter.bi_sector;  	r10_bio->state = 0;  	/* We might need to issue multiple reads to different @@ -1302,13 +1246,13 @@ read_again:  		slot = r10_bio->read_slot;  		read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev); -		md_trim_bio(read_bio, r10_bio->sector - bio->bi_sector, -			    max_sectors); +		bio_trim(read_bio, r10_bio->sector - bio->bi_iter.bi_sector, +			 max_sectors);  		r10_bio->devs[slot].bio = read_bio;  		r10_bio->devs[slot].rdev = rdev; -		read_bio->bi_sector = r10_bio->devs[slot].addr + +		read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr +  			choose_data_offset(r10_bio, rdev);  		read_bio->bi_bdev = rdev->bdev;  		read_bio->bi_end_io = raid10_end_read_request; @@ -1319,15 +1263,15 @@ read_again:  			/* Could not read all from this device, so we will  			 * need another r10_bio.  			 */ -			sectors_handled = (r10_bio->sectors + max_sectors -					   - bio->bi_sector); +			sectors_handled = (r10_bio->sector + max_sectors +					   - bio->bi_iter.bi_sector);  			r10_bio->sectors = max_sectors;  			spin_lock_irq(&conf->device_lock);  			if (bio->bi_phys_segments == 0)  				bio->bi_phys_segments = 2;  			else  				bio->bi_phys_segments++; -			spin_unlock(&conf->device_lock); +			spin_unlock_irq(&conf->device_lock);  			/* Cannot call generic_make_request directly  			 * as that will be queued in __generic_make_request  			 * and subsequent mempool_alloc might block @@ -1341,7 +1285,8 @@ read_again:  			r10_bio->sectors = bio_sectors(bio) - sectors_handled;  			r10_bio->state = 0;  			r10_bio->mddev = mddev; -			r10_bio->sector = bio->bi_sector + sectors_handled; +			r10_bio->sector = bio->bi_iter.bi_sector + +				sectors_handled;  			goto read_again;  		} else  			generic_make_request(read_bio); @@ -1499,7 +1444,8 @@ retry_write:  			bio->bi_phys_segments++;  		spin_unlock_irq(&conf->device_lock);  	} -	sectors_handled = r10_bio->sector + max_sectors - bio->bi_sector; +	sectors_handled = r10_bio->sector + max_sectors - +		bio->bi_iter.bi_sector;  	atomic_set(&r10_bio->remaining, 1);  	bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0); @@ -1510,11 +1456,11 @@ retry_write:  		if (r10_bio->devs[i].bio) {  			struct md_rdev *rdev = conf->mirrors[d].rdev;  			mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); -			md_trim_bio(mbio, r10_bio->sector - bio->bi_sector, -				    max_sectors); +			bio_trim(mbio, r10_bio->sector - bio->bi_iter.bi_sector, +				 max_sectors);  			r10_bio->devs[i].bio = mbio; -			mbio->bi_sector	= (r10_bio->devs[i].addr+ +			mbio->bi_iter.bi_sector	= (r10_bio->devs[i].addr+  					   choose_data_offset(r10_bio,  							      rdev));  			mbio->bi_bdev = rdev->bdev; @@ -1553,11 +1499,11 @@ retry_write:  				rdev = conf->mirrors[d].rdev;  			}  			mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); -			md_trim_bio(mbio, r10_bio->sector - bio->bi_sector, -				    max_sectors); +			bio_trim(mbio, r10_bio->sector - bio->bi_iter.bi_sector, +				 max_sectors);  			r10_bio->devs[i].repl_bio = mbio; -			mbio->bi_sector	= (r10_bio->devs[i].addr + +			mbio->bi_iter.bi_sector	= (r10_bio->devs[i].addr +  					   choose_data_offset(  						   r10_bio, rdev));  			mbio->bi_bdev = rdev->bdev; @@ -1591,11 +1537,51 @@ retry_write:  		r10_bio->sectors = bio_sectors(bio) - sectors_handled;  		r10_bio->mddev = mddev; -		r10_bio->sector = bio->bi_sector + sectors_handled; +		r10_bio->sector = bio->bi_iter.bi_sector + sectors_handled;  		r10_bio->state = 0;  		goto retry_write;  	}  	one_write_done(r10_bio); +} + +static void make_request(struct mddev *mddev, struct bio *bio) +{ +	struct r10conf *conf = mddev->private; +	sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask); +	int chunk_sects = chunk_mask + 1; + +	struct bio *split; + +	if (unlikely(bio->bi_rw & REQ_FLUSH)) { +		md_flush_request(mddev, bio); +		return; +	} + +	md_write_start(mddev, bio); + + +	do { + +		/* +		 * If this request crosses a chunk boundary, we need to split +		 * it. +		 */ +		if (unlikely((bio->bi_iter.bi_sector & chunk_mask) + +			     bio_sectors(bio) > chunk_sects +			     && (conf->geo.near_copies < conf->geo.raid_disks +				 || conf->prev.near_copies < +				 conf->prev.raid_disks))) { +			split = bio_split(bio, chunk_sects - +					  (bio->bi_iter.bi_sector & +					   (chunk_sects - 1)), +					  GFP_NOIO, fs_bio_set); +			bio_chain(split, bio); +		} else { +			split = bio; +		} + +		__make_request(mddev, split); +	} while (split != bio);  	/* In case raid10d snuck in to freeze_array */  	wake_up(&conf->wait_barrier); @@ -1782,6 +1768,7 @@ static int raid10_spare_active(struct mddev *mddev)  			}  			sysfs_notify_dirent_safe(tmp->replacement->sysfs_state);  		} else if (tmp->rdev +			   && tmp->rdev->recovery_offset == MaxSector  			   && !test_bit(Faulty, &tmp->rdev->flags)  			   && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {  			count++; @@ -2123,10 +2110,10 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)  		bio_reset(tbio);  		tbio->bi_vcnt = vcnt; -		tbio->bi_size = r10_bio->sectors << 9; +		tbio->bi_iter.bi_size = r10_bio->sectors << 9;  		tbio->bi_rw = WRITE;  		tbio->bi_private = r10_bio; -		tbio->bi_sector = r10_bio->devs[i].addr; +		tbio->bi_iter.bi_sector = r10_bio->devs[i].addr;  		for (j=0; j < vcnt ; j++) {  			tbio->bi_io_vec[j].bv_offset = 0; @@ -2143,7 +2130,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)  		atomic_inc(&r10_bio->remaining);  		md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio)); -		tbio->bi_sector += conf->mirrors[d].rdev->data_offset; +		tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset;  		tbio->bi_bdev = conf->mirrors[d].rdev->bdev;  		generic_make_request(tbio);  	} @@ -2613,8 +2600,8 @@ static int narrow_write_error(struct r10bio *r10_bio, int i)  			sectors = sect_to_write;  		/* Write at 'sector' for 'sectors' */  		wbio = bio_clone_mddev(bio, GFP_NOIO, mddev); -		md_trim_bio(wbio, sector - bio->bi_sector, sectors); -		wbio->bi_sector = (r10_bio->devs[i].addr+ +		bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors); +		wbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+  				   choose_data_offset(r10_bio, rdev) +  				   (sector - r10_bio->sector));  		wbio->bi_bdev = rdev->bdev; @@ -2686,12 +2673,10 @@ read_more:  		(unsigned long long)r10_bio->sector);  	bio = bio_clone_mddev(r10_bio->master_bio,  			      GFP_NOIO, mddev); -	md_trim_bio(bio, -		    r10_bio->sector - bio->bi_sector, -		    max_sectors); +	bio_trim(bio, r10_bio->sector - bio->bi_iter.bi_sector, max_sectors);  	r10_bio->devs[slot].bio = bio;  	r10_bio->devs[slot].rdev = rdev; -	bio->bi_sector = r10_bio->devs[slot].addr +	bio->bi_iter.bi_sector = r10_bio->devs[slot].addr  		+ choose_data_offset(r10_bio, rdev);  	bio->bi_bdev = rdev->bdev;  	bio->bi_rw = READ | do_sync; @@ -2702,7 +2687,7 @@ read_more:  		struct bio *mbio = r10_bio->master_bio;  		int sectors_handled =  			r10_bio->sector + max_sectors -			- mbio->bi_sector; +			- mbio->bi_iter.bi_sector;  		r10_bio->sectors = max_sectors;  		spin_lock_irq(&conf->device_lock);  		if (mbio->bi_phys_segments == 0) @@ -2720,7 +2705,7 @@ read_more:  		set_bit(R10BIO_ReadError,  			&r10_bio->state);  		r10_bio->mddev = mddev; -		r10_bio->sector = mbio->bi_sector +		r10_bio->sector = mbio->bi_iter.bi_sector  			+ sectors_handled;  		goto read_more; @@ -3158,7 +3143,8 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,  				bio->bi_end_io = end_sync_read;  				bio->bi_rw = READ;  				from_addr = r10_bio->devs[j].addr; -				bio->bi_sector = from_addr + rdev->data_offset; +				bio->bi_iter.bi_sector = from_addr + +					rdev->data_offset;  				bio->bi_bdev = rdev->bdev;  				atomic_inc(&rdev->nr_pending);  				/* and we write to 'i' (if not in_sync) */ @@ -3182,7 +3168,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,  					bio->bi_private = r10_bio;  					bio->bi_end_io = end_sync_write;  					bio->bi_rw = WRITE; -					bio->bi_sector = to_addr +					bio->bi_iter.bi_sector = to_addr  						+ rdev->data_offset;  					bio->bi_bdev = rdev->bdev;  					atomic_inc(&r10_bio->remaining); @@ -3211,7 +3197,8 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,  				bio->bi_private = r10_bio;  				bio->bi_end_io = end_sync_write;  				bio->bi_rw = WRITE; -				bio->bi_sector = to_addr + rdev->data_offset; +				bio->bi_iter.bi_sector = to_addr + +					rdev->data_offset;  				bio->bi_bdev = rdev->bdev;  				atomic_inc(&r10_bio->remaining);  				break; @@ -3219,10 +3206,6 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,  			if (j == conf->copies) {  				/* Cannot recover, so abort the recovery or  				 * record a bad block */ -				put_buf(r10_bio); -				if (rb2) -					atomic_dec(&rb2->remaining); -				r10_bio = rb2;  				if (any_working) {  					/* problem is that there are bad blocks  					 * on other device(s) @@ -3254,6 +3237,10 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,  					mirror->recovery_disabled  						= mddev->recovery_disabled;  				} +				put_buf(r10_bio); +				if (rb2) +					atomic_dec(&rb2->remaining); +				r10_bio = rb2;  				break;  			}  		} @@ -3329,7 +3316,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,  			bio->bi_private = r10_bio;  			bio->bi_end_io = end_sync_read;  			bio->bi_rw = READ; -			bio->bi_sector = sector + +			bio->bi_iter.bi_sector = sector +  				conf->mirrors[d].rdev->data_offset;  			bio->bi_bdev = conf->mirrors[d].rdev->bdev;  			count++; @@ -3351,7 +3338,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,  			bio->bi_private = r10_bio;  			bio->bi_end_io = end_sync_write;  			bio->bi_rw = WRITE; -			bio->bi_sector = sector + +			bio->bi_iter.bi_sector = sector +  				conf->mirrors[d].replacement->data_offset;  			bio->bi_bdev = conf->mirrors[d].replacement->bdev;  			count++; @@ -3398,7 +3385,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,  			     bio2 = bio2->bi_next) {  				/* remove last page from this bio */  				bio2->bi_vcnt--; -				bio2->bi_size -= len; +				bio2->bi_iter.bi_size -= len;  				bio2->bi_flags &= ~(1<< BIO_SEG_VALID);  			}  			goto bio_full; @@ -3748,7 +3735,8 @@ static int run(struct mddev *mddev)  		    !test_bit(In_sync, &disk->rdev->flags)) {  			disk->head_position = 0;  			mddev->degraded++; -			if (disk->rdev) +			if (disk->rdev && +			    disk->rdev->saved_raid_disk < 0)  				conf->fullsync = 1;  		}  		disk->recovery_disabled = mddev->recovery_disabled - 1; @@ -4385,7 +4373,11 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,  		set_bit(MD_CHANGE_DEVS, &mddev->flags);  		md_wakeup_thread(mddev->thread);  		wait_event(mddev->sb_wait, mddev->flags == 0 || -			   kthread_should_stop()); +			   test_bit(MD_RECOVERY_INTR, &mddev->recovery)); +		if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { +			allow_barrier(conf); +			return sectors_done; +		}  		conf->reshape_safe = mddev->reshape_position;  		allow_barrier(conf);  	} @@ -4414,7 +4406,7 @@ read_more:  	read_bio = bio_alloc_mddev(GFP_KERNEL, RESYNC_PAGES, mddev);  	read_bio->bi_bdev = rdev->bdev; -	read_bio->bi_sector = (r10_bio->devs[r10_bio->read_slot].addr +	read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr  			       + rdev->data_offset);  	read_bio->bi_private = r10_bio;  	read_bio->bi_end_io = end_sync_read; @@ -4422,7 +4414,7 @@ read_more:  	read_bio->bi_flags &= ~(BIO_POOL_MASK - 1);  	read_bio->bi_flags |= 1 << BIO_UPTODATE;  	read_bio->bi_vcnt = 0; -	read_bio->bi_size = 0; +	read_bio->bi_iter.bi_size = 0;  	r10_bio->master_bio = read_bio;  	r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum; @@ -4448,7 +4440,8 @@ read_more:  		bio_reset(b);  		b->bi_bdev = rdev2->bdev; -		b->bi_sector = r10_bio->devs[s/2].addr + rdev2->new_data_offset; +		b->bi_iter.bi_sector = r10_bio->devs[s/2].addr + +			rdev2->new_data_offset;  		b->bi_private = r10_bio;  		b->bi_end_io = end_reshape_write;  		b->bi_rw = WRITE; @@ -4475,7 +4468,7 @@ read_more:  			     bio2 = bio2->bi_next) {  				/* Remove last page from this bio */  				bio2->bi_vcnt--; -				bio2->bi_size -= len; +				bio2->bi_iter.bi_size -= len;  				bio2->bi_flags &= ~(1<<BIO_SEG_VALID);  			}  			goto bio_full; diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 7ff4f252ca1..6234b2e8458 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -85,6 +85,42 @@ static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect)  	return &conf->stripe_hashtbl[hash];  } +static inline int stripe_hash_locks_hash(sector_t sect) +{ +	return (sect >> STRIPE_SHIFT) & STRIPE_HASH_LOCKS_MASK; +} + +static inline void lock_device_hash_lock(struct r5conf *conf, int hash) +{ +	spin_lock_irq(conf->hash_locks + hash); +	spin_lock(&conf->device_lock); +} + +static inline void unlock_device_hash_lock(struct r5conf *conf, int hash) +{ +	spin_unlock(&conf->device_lock); +	spin_unlock_irq(conf->hash_locks + hash); +} + +static inline void lock_all_device_hash_locks_irq(struct r5conf *conf) +{ +	int i; +	local_irq_disable(); +	spin_lock(conf->hash_locks); +	for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++) +		spin_lock_nest_lock(conf->hash_locks + i, conf->hash_locks); +	spin_lock(&conf->device_lock); +} + +static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf) +{ +	int i; +	spin_unlock(&conf->device_lock); +	for (i = NR_STRIPE_HASH_LOCKS; i; i--) +		spin_unlock(conf->hash_locks + i - 1); +	local_irq_enable(); +} +  /* bio's attached to a stripe+device for I/O are linked together in bi_sector   * order without overlap.  There may be several bio's per stripe+device, and   * a bio could span several devices. @@ -97,7 +133,7 @@ static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect)  static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector)  {  	int sectors = bio_sectors(bio); -	if (bio->bi_sector + sectors < sector + STRIPE_SECTORS) +	if (bio->bi_iter.bi_sector + sectors < sector + STRIPE_SECTORS)  		return bio->bi_next;  	else  		return NULL; @@ -189,7 +225,7 @@ static void return_io(struct bio *return_bi)  		return_bi = bi->bi_next;  		bi->bi_next = NULL; -		bi->bi_size = 0; +		bi->bi_iter.bi_size = 0;  		trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),  					 bi, 0);  		bio_endio(bi, 0); @@ -249,15 +285,19 @@ static void raid5_wakeup_stripe_thread(struct stripe_head *sh)  	}  } -static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh) +static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh, +			      struct list_head *temp_inactive_list)  {  	BUG_ON(!list_empty(&sh->lru));  	BUG_ON(atomic_read(&conf->active_stripes)==0);  	if (test_bit(STRIPE_HANDLE, &sh->state)) {  		if (test_bit(STRIPE_DELAYED, &sh->state) && -		    !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) +		    !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {  			list_add_tail(&sh->lru, &conf->delayed_list); -		else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && +			if (atomic_read(&conf->preread_active_stripes) +			    < IO_THRESHOLD) +				md_wakeup_thread(conf->mddev->thread); +		} else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&  			   sh->bm_seq - conf->seq_write > 0)  			list_add_tail(&sh->lru, &conf->bitmap_list);  		else { @@ -278,37 +318,68 @@ static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh)  			    < IO_THRESHOLD)  				md_wakeup_thread(conf->mddev->thread);  		atomic_dec(&conf->active_stripes); -		if (!test_bit(STRIPE_EXPANDING, &sh->state)) { -			list_add_tail(&sh->lru, &conf->inactive_list); -			wake_up(&conf->wait_for_stripe); -			if (conf->retry_read_aligned) -				md_wakeup_thread(conf->mddev->thread); -		} +		if (!test_bit(STRIPE_EXPANDING, &sh->state)) +			list_add_tail(&sh->lru, temp_inactive_list);  	}  } -static void __release_stripe(struct r5conf *conf, struct stripe_head *sh) +static void __release_stripe(struct r5conf *conf, struct stripe_head *sh, +			     struct list_head *temp_inactive_list)  {  	if (atomic_dec_and_test(&sh->count)) -		do_release_stripe(conf, sh); +		do_release_stripe(conf, sh, temp_inactive_list);  } -static struct llist_node *llist_reverse_order(struct llist_node *head) +/* + * @hash could be NR_STRIPE_HASH_LOCKS, then we have a list of inactive_list + * + * Be careful: Only one task can add/delete stripes from temp_inactive_list at + * given time. Adding stripes only takes device lock, while deleting stripes + * only takes hash lock. + */ +static void release_inactive_stripe_list(struct r5conf *conf, +					 struct list_head *temp_inactive_list, +					 int hash)  { -	struct llist_node *new_head = NULL; +	int size; +	bool do_wakeup = false; +	unsigned long flags; -	while (head) { -		struct llist_node *tmp = head; -		head = head->next; -		tmp->next = new_head; -		new_head = tmp; +	if (hash == NR_STRIPE_HASH_LOCKS) { +		size = NR_STRIPE_HASH_LOCKS; +		hash = NR_STRIPE_HASH_LOCKS - 1; +	} else +		size = 1; +	while (size) { +		struct list_head *list = &temp_inactive_list[size - 1]; + +		/* +		 * We don't hold any lock here yet, get_active_stripe() might +		 * remove stripes from the list +		 */ +		if (!list_empty_careful(list)) { +			spin_lock_irqsave(conf->hash_locks + hash, flags); +			if (list_empty(conf->inactive_list + hash) && +			    !list_empty(list)) +				atomic_dec(&conf->empty_inactive_list_nr); +			list_splice_tail_init(list, conf->inactive_list + hash); +			do_wakeup = true; +			spin_unlock_irqrestore(conf->hash_locks + hash, flags); +		} +		size--; +		hash--;  	} -	return new_head; +	if (do_wakeup) { +		wake_up(&conf->wait_for_stripe); +		if (conf->retry_read_aligned) +			md_wakeup_thread(conf->mddev->thread); +	}  }  /* should hold conf->device_lock already */ -static int release_stripe_list(struct r5conf *conf) +static int release_stripe_list(struct r5conf *conf, +			       struct list_head *temp_inactive_list)  {  	struct stripe_head *sh;  	int count = 0; @@ -317,6 +388,8 @@ static int release_stripe_list(struct r5conf *conf)  	head = llist_del_all(&conf->released_stripes);  	head = llist_reverse_order(head);  	while (head) { +		int hash; +  		sh = llist_entry(head, struct stripe_head, release_list);  		head = llist_next(head);  		/* sh could be readded after STRIPE_ON_RELEASE_LIST is cleard */ @@ -327,7 +400,8 @@ static int release_stripe_list(struct r5conf *conf)  		 * again, the count is always > 1. This is true for  		 * STRIPE_ON_UNPLUG_LIST bit too.  		 */ -		__release_stripe(conf, sh); +		hash = sh->hash_lock_index; +		__release_stripe(conf, sh, &temp_inactive_list[hash]);  		count++;  	} @@ -338,9 +412,17 @@ static void release_stripe(struct stripe_head *sh)  {  	struct r5conf *conf = sh->raid_conf;  	unsigned long flags; +	struct list_head list; +	int hash;  	bool wakeup; -	if (test_and_set_bit(STRIPE_ON_RELEASE_LIST, &sh->state)) +	/* Avoid release_list until the last reference. +	 */ +	if (atomic_add_unless(&sh->count, -1, 1)) +		return; + +	if (unlikely(!conf->mddev->thread) || +		test_and_set_bit(STRIPE_ON_RELEASE_LIST, &sh->state))  		goto slow_path;  	wakeup = llist_add(&sh->release_list, &conf->released_stripes);  	if (wakeup) @@ -350,8 +432,11 @@ slow_path:  	local_irq_save(flags);  	/* we are ok here if STRIPE_ON_RELEASE_LIST is set or not */  	if (atomic_dec_and_lock(&sh->count, &conf->device_lock)) { -		do_release_stripe(conf, sh); +		INIT_LIST_HEAD(&list); +		hash = sh->hash_lock_index; +		do_release_stripe(conf, sh, &list);  		spin_unlock(&conf->device_lock); +		release_inactive_stripe_list(conf, &list, hash);  	}  	local_irq_restore(flags);  } @@ -376,18 +461,21 @@ static inline void insert_hash(struct r5conf *conf, struct stripe_head *sh)  /* find an idle stripe, make sure it is unhashed, and return it. */ -static struct stripe_head *get_free_stripe(struct r5conf *conf) +static struct stripe_head *get_free_stripe(struct r5conf *conf, int hash)  {  	struct stripe_head *sh = NULL;  	struct list_head *first; -	if (list_empty(&conf->inactive_list)) +	if (list_empty(conf->inactive_list + hash))  		goto out; -	first = conf->inactive_list.next; +	first = (conf->inactive_list + hash)->next;  	sh = list_entry(first, struct stripe_head, lru);  	list_del_init(first);  	remove_hash(sh);  	atomic_inc(&conf->active_stripes); +	BUG_ON(hash != sh->hash_lock_index); +	if (list_empty(conf->inactive_list + hash)) +		atomic_inc(&conf->empty_inactive_list_nr);  out:  	return sh;  } @@ -399,6 +487,7 @@ static void shrink_buffers(struct stripe_head *sh)  	int num = sh->raid_conf->pool_size;  	for (i = 0; i < num ; i++) { +		WARN_ON(sh->dev[i].page != sh->dev[i].orig_page);  		p = sh->dev[i].page;  		if (!p)  			continue; @@ -419,6 +508,7 @@ static int grow_buffers(struct stripe_head *sh)  			return 1;  		}  		sh->dev[i].page = page; +		sh->dev[i].orig_page = page;  	}  	return 0;  } @@ -430,7 +520,7 @@ static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous,  static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)  {  	struct r5conf *conf = sh->raid_conf; -	int i; +	int i, seq;  	BUG_ON(atomic_read(&sh->count) != 0);  	BUG_ON(test_bit(STRIPE_HANDLE, &sh->state)); @@ -440,7 +530,8 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)  		(unsigned long long)sh->sector);  	remove_hash(sh); - +retry: +	seq = read_seqcount_begin(&conf->gen_lock);  	sh->generation = conf->generation - previous;  	sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks;  	sh->sector = sector; @@ -462,6 +553,8 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)  		dev->flags = 0;  		raid5_build_block(sh, i, previous);  	} +	if (read_seqcount_retry(&conf->gen_lock, seq)) +		goto retry;  	insert_hash(conf, sh);  	sh->cpu = smp_processor_id();  } @@ -566,57 +659,55 @@ get_active_stripe(struct r5conf *conf, sector_t sector,  		  int previous, int noblock, int noquiesce)  {  	struct stripe_head *sh; +	int hash = stripe_hash_locks_hash(sector);  	pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector); -	spin_lock_irq(&conf->device_lock); +	spin_lock_irq(conf->hash_locks + hash);  	do {  		wait_event_lock_irq(conf->wait_for_stripe,  				    conf->quiesce == 0 || noquiesce, -				    conf->device_lock); +				    *(conf->hash_locks + hash));  		sh = __find_stripe(conf, sector, conf->generation - previous);  		if (!sh) {  			if (!conf->inactive_blocked) -				sh = get_free_stripe(conf); +				sh = get_free_stripe(conf, hash);  			if (noblock && sh == NULL)  				break;  			if (!sh) {  				conf->inactive_blocked = 1; -				wait_event_lock_irq(conf->wait_for_stripe, -						    !list_empty(&conf->inactive_list) && -						    (atomic_read(&conf->active_stripes) -						     < (conf->max_nr_stripes *3/4) -						     || !conf->inactive_blocked), -						    conf->device_lock); +				wait_event_lock_irq( +					conf->wait_for_stripe, +					!list_empty(conf->inactive_list + hash) && +					(atomic_read(&conf->active_stripes) +					 < (conf->max_nr_stripes * 3 / 4) +					 || !conf->inactive_blocked), +					*(conf->hash_locks + hash));  				conf->inactive_blocked = 0; -			} else -				init_stripe(sh, sector, previous); -		} else { -			if (atomic_read(&sh->count)) { -				BUG_ON(!list_empty(&sh->lru) -				    && !test_bit(STRIPE_EXPANDING, &sh->state) -				    && !test_bit(STRIPE_ON_UNPLUG_LIST, &sh->state) -				    && !test_bit(STRIPE_ON_RELEASE_LIST, &sh->state));  			} else { +				init_stripe(sh, sector, previous); +				atomic_inc(&sh->count); +			} +		} else if (!atomic_inc_not_zero(&sh->count)) { +			spin_lock(&conf->device_lock); +			if (!atomic_read(&sh->count)) {  				if (!test_bit(STRIPE_HANDLE, &sh->state))  					atomic_inc(&conf->active_stripes); -				if (list_empty(&sh->lru) && -				    !test_bit(STRIPE_EXPANDING, &sh->state)) -					BUG(); +				BUG_ON(list_empty(&sh->lru) && +				       !test_bit(STRIPE_EXPANDING, &sh->state));  				list_del_init(&sh->lru);  				if (sh->group) {  					sh->group->stripes_cnt--;  					sh->group = NULL;  				}  			} +			atomic_inc(&sh->count); +			spin_unlock(&conf->device_lock);  		}  	} while (sh == NULL); -	if (sh) -		atomic_inc(&sh->count); - -	spin_unlock_irq(&conf->device_lock); +	spin_unlock_irq(conf->hash_locks + hash);  	return sh;  } @@ -766,18 +857,27 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)  				bi->bi_rw, i);  			atomic_inc(&sh->count);  			if (use_new_offset(conf, sh)) -				bi->bi_sector = (sh->sector +				bi->bi_iter.bi_sector = (sh->sector  						 + rdev->new_data_offset);  			else -				bi->bi_sector = (sh->sector +				bi->bi_iter.bi_sector = (sh->sector  						 + rdev->data_offset);  			if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) -				bi->bi_rw |= REQ_FLUSH; +				bi->bi_rw |= REQ_NOMERGE; +			if (test_bit(R5_SkipCopy, &sh->dev[i].flags)) +				WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); +			sh->dev[i].vec.bv_page = sh->dev[i].page;  			bi->bi_vcnt = 1;  			bi->bi_io_vec[0].bv_len = STRIPE_SIZE;  			bi->bi_io_vec[0].bv_offset = 0; -			bi->bi_size = STRIPE_SIZE; +			bi->bi_iter.bi_size = STRIPE_SIZE; +			/* +			 * If this is discard request, set bi_vcnt 0. We don't +			 * want to confuse SCSI because SCSI will replace payload +			 */ +			if (rw & REQ_DISCARD) +				bi->bi_vcnt = 0;  			if (rrdev)  				set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags); @@ -807,15 +907,24 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)  				rbi->bi_rw, i);  			atomic_inc(&sh->count);  			if (use_new_offset(conf, sh)) -				rbi->bi_sector = (sh->sector +				rbi->bi_iter.bi_sector = (sh->sector  						  + rrdev->new_data_offset);  			else -				rbi->bi_sector = (sh->sector +				rbi->bi_iter.bi_sector = (sh->sector  						  + rrdev->data_offset); +			if (test_bit(R5_SkipCopy, &sh->dev[i].flags)) +				WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); +			sh->dev[i].rvec.bv_page = sh->dev[i].page;  			rbi->bi_vcnt = 1;  			rbi->bi_io_vec[0].bv_len = STRIPE_SIZE;  			rbi->bi_io_vec[0].bv_offset = 0; -			rbi->bi_size = STRIPE_SIZE; +			rbi->bi_iter.bi_size = STRIPE_SIZE; +			/* +			 * If this is discard request, set bi_vcnt 0. We don't +			 * want to confuse SCSI because SCSI will replace payload +			 */ +			if (rw & REQ_DISCARD) +				rbi->bi_vcnt = 0;  			if (conf->mddev->gendisk)  				trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev),  						      rbi, disk_devt(conf->mddev->gendisk), @@ -834,27 +943,28 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)  }  static struct dma_async_tx_descriptor * -async_copy_data(int frombio, struct bio *bio, struct page *page, -	sector_t sector, struct dma_async_tx_descriptor *tx) +async_copy_data(int frombio, struct bio *bio, struct page **page, +	sector_t sector, struct dma_async_tx_descriptor *tx, +	struct stripe_head *sh)  { -	struct bio_vec *bvl; +	struct bio_vec bvl; +	struct bvec_iter iter;  	struct page *bio_page; -	int i;  	int page_offset;  	struct async_submit_ctl submit;  	enum async_tx_flags flags = 0; -	if (bio->bi_sector >= sector) -		page_offset = (signed)(bio->bi_sector - sector) * 512; +	if (bio->bi_iter.bi_sector >= sector) +		page_offset = (signed)(bio->bi_iter.bi_sector - sector) * 512;  	else -		page_offset = (signed)(sector - bio->bi_sector) * -512; +		page_offset = (signed)(sector - bio->bi_iter.bi_sector) * -512;  	if (frombio)  		flags |= ASYNC_TX_FENCE;  	init_async_submit(&submit, flags, tx, NULL, NULL, NULL); -	bio_for_each_segment(bvl, bio, i) { -		int len = bvl->bv_len; +	bio_for_each_segment(bvl, bio, iter) { +		int len = bvl.bv_len;  		int clen;  		int b_offset = 0; @@ -870,13 +980,18 @@ async_copy_data(int frombio, struct bio *bio, struct page *page,  			clen = len;  		if (clen > 0) { -			b_offset += bvl->bv_offset; -			bio_page = bvl->bv_page; -			if (frombio) -				tx = async_memcpy(page, bio_page, page_offset, +			b_offset += bvl.bv_offset; +			bio_page = bvl.bv_page; +			if (frombio) { +				if (sh->raid_conf->skip_copy && +				    b_offset == 0 && page_offset == 0 && +				    clen == STRIPE_SIZE) +					*page = bio_page; +				else +					tx = async_memcpy(*page, bio_page, page_offset,  						  b_offset, clen, &submit); -			else -				tx = async_memcpy(bio_page, page, b_offset, +			} else +				tx = async_memcpy(bio_page, *page, b_offset,  						  page_offset, clen, &submit);  		}  		/* chain the operations */ @@ -914,7 +1029,7 @@ static void ops_complete_biofill(void *stripe_head_ref)  			BUG_ON(!dev->read);  			rbi = dev->read;  			dev->read = NULL; -			while (rbi && rbi->bi_sector < +			while (rbi && rbi->bi_iter.bi_sector <  				dev->sector + STRIPE_SECTORS) {  				rbi2 = r5_next_bio(rbi, dev->sector);  				if (!raid5_dec_bi_active_stripes(rbi)) { @@ -950,10 +1065,10 @@ static void ops_run_biofill(struct stripe_head *sh)  			dev->read = rbi = dev->toread;  			dev->toread = NULL;  			spin_unlock_irq(&sh->stripe_lock); -			while (rbi && rbi->bi_sector < +			while (rbi && rbi->bi_iter.bi_sector <  				dev->sector + STRIPE_SECTORS) { -				tx = async_copy_data(0, rbi, dev->page, -					dev->sector, tx); +				tx = async_copy_data(0, rbi, &dev->page, +					dev->sector, tx, sh);  				rbi = r5_next_bio(rbi, dev->sector);  			}  		} @@ -1291,8 +1406,9 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)  			BUG_ON(dev->written);  			wbi = dev->written = chosen;  			spin_unlock_irq(&sh->stripe_lock); +			WARN_ON(dev->page != dev->orig_page); -			while (wbi && wbi->bi_sector < +			while (wbi && wbi->bi_iter.bi_sector <  				dev->sector + STRIPE_SECTORS) {  				if (wbi->bi_rw & REQ_FUA)  					set_bit(R5_WantFUA, &dev->flags); @@ -1300,9 +1416,15 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)  					set_bit(R5_SyncIO, &dev->flags);  				if (wbi->bi_rw & REQ_DISCARD)  					set_bit(R5_Discard, &dev->flags); -				else -					tx = async_copy_data(1, wbi, dev->page, -						dev->sector, tx); +				else { +					tx = async_copy_data(1, wbi, &dev->page, +						dev->sector, tx, sh); +					if (dev->page != dev->orig_page) { +						set_bit(R5_SkipCopy, &dev->flags); +						clear_bit(R5_UPTODATE, &dev->flags); +						clear_bit(R5_OVERWRITE, &dev->flags); +					} +				}  				wbi = r5_next_bio(wbi, dev->sector);  			}  		} @@ -1333,7 +1455,7 @@ static void ops_complete_reconstruct(void *stripe_head_ref)  		struct r5dev *dev = &sh->dev[i];  		if (dev->written || i == pd_idx || i == qd_idx) { -			if (!discard) +			if (!discard && !test_bit(R5_SkipCopy, &dev->flags))  				set_bit(R5_UPTODATE, &dev->flags);  			if (fua)  				set_bit(R5_WantFUA, &dev->flags); @@ -1584,7 +1706,7 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)  	put_cpu();  } -static int grow_one_stripe(struct r5conf *conf) +static int grow_one_stripe(struct r5conf *conf, int hash)  {  	struct stripe_head *sh;  	sh = kmem_cache_zalloc(conf->slab_cache, GFP_KERNEL); @@ -1600,6 +1722,7 @@ static int grow_one_stripe(struct r5conf *conf)  		kmem_cache_free(conf->slab_cache, sh);  		return 0;  	} +	sh->hash_lock_index = hash;  	/* we just created an active stripe so... */  	atomic_set(&sh->count, 1);  	atomic_inc(&conf->active_stripes); @@ -1612,6 +1735,7 @@ static int grow_stripes(struct r5conf *conf, int num)  {  	struct kmem_cache *sc;  	int devs = max(conf->raid_disks, conf->previous_raid_disks); +	int hash;  	if (conf->mddev->gendisk)  		sprintf(conf->cache_name[0], @@ -1629,9 +1753,13 @@ static int grow_stripes(struct r5conf *conf, int num)  		return 1;  	conf->slab_cache = sc;  	conf->pool_size = devs; -	while (num--) -		if (!grow_one_stripe(conf)) +	hash = conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS; +	while (num--) { +		if (!grow_one_stripe(conf, hash))  			return 1; +		conf->max_nr_stripes++; +		hash = (hash + 1) % NR_STRIPE_HASH_LOCKS; +	}  	return 0;  } @@ -1689,6 +1817,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)  	int err;  	struct kmem_cache *sc;  	int i; +	int hash, cnt;  	if (newsize <= conf->pool_size)  		return 0; /* never bother to shrink */ @@ -1728,19 +1857,31 @@ static int resize_stripes(struct r5conf *conf, int newsize)  	 * OK, we have enough stripes, start collecting inactive  	 * stripes and copying them over  	 */ +	hash = 0; +	cnt = 0;  	list_for_each_entry(nsh, &newstripes, lru) { -		spin_lock_irq(&conf->device_lock); -		wait_event_lock_irq(conf->wait_for_stripe, -				    !list_empty(&conf->inactive_list), -				    conf->device_lock); -		osh = get_free_stripe(conf); -		spin_unlock_irq(&conf->device_lock); +		lock_device_hash_lock(conf, hash); +		wait_event_cmd(conf->wait_for_stripe, +				    !list_empty(conf->inactive_list + hash), +				    unlock_device_hash_lock(conf, hash), +				    lock_device_hash_lock(conf, hash)); +		osh = get_free_stripe(conf, hash); +		unlock_device_hash_lock(conf, hash);  		atomic_set(&nsh->count, 1); -		for(i=0; i<conf->pool_size; i++) +		for(i=0; i<conf->pool_size; i++) {  			nsh->dev[i].page = osh->dev[i].page; +			nsh->dev[i].orig_page = osh->dev[i].page; +		}  		for( ; i<newsize; i++)  			nsh->dev[i].page = NULL; +		nsh->hash_lock_index = hash;  		kmem_cache_free(conf->slab_cache, osh); +		cnt++; +		if (cnt >= conf->max_nr_stripes / NR_STRIPE_HASH_LOCKS + +		    !!((conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS) > hash)) { +			hash++; +			cnt = 0; +		}  	}  	kmem_cache_destroy(conf->slab_cache); @@ -1786,6 +1927,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)  			if (nsh->dev[i].page == NULL) {  				struct page *p = alloc_page(GFP_NOIO);  				nsh->dev[i].page = p; +				nsh->dev[i].orig_page = p;  				if (!p)  					err = -ENOMEM;  			} @@ -1799,13 +1941,13 @@ static int resize_stripes(struct r5conf *conf, int newsize)  	return err;  } -static int drop_one_stripe(struct r5conf *conf) +static int drop_one_stripe(struct r5conf *conf, int hash)  {  	struct stripe_head *sh; -	spin_lock_irq(&conf->device_lock); -	sh = get_free_stripe(conf); -	spin_unlock_irq(&conf->device_lock); +	spin_lock_irq(conf->hash_locks + hash); +	sh = get_free_stripe(conf, hash); +	spin_unlock_irq(conf->hash_locks + hash);  	if (!sh)  		return 0;  	BUG_ON(atomic_read(&sh->count)); @@ -1817,8 +1959,10 @@ static int drop_one_stripe(struct r5conf *conf)  static void shrink_stripes(struct r5conf *conf)  { -	while (drop_one_stripe(conf)) -		; +	int hash; +	for (hash = 0; hash < NR_STRIPE_HASH_LOCKS; hash++) +		while (drop_one_stripe(conf, hash)) +			;  	if (conf->slab_cache)  		kmem_cache_destroy(conf->slab_cache); @@ -1923,6 +2067,9 @@ static void raid5_end_read_request(struct bio * bi, int error)  			       mdname(conf->mddev), bdn);  		else  			retry = 1; +		if (set_bad && test_bit(In_sync, &rdev->flags) +		    && !test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) +			retry = 1;  		if (retry)  			if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) {  				set_bit(R5_ReadError, &sh->dev[i].flags); @@ -1991,6 +2138,7 @@ static void raid5_end_write_request(struct bio *bi, int error)  			set_bit(R5_MadeGoodRepl, &sh->dev[i].flags);  	} else {  		if (!uptodate) { +			set_bit(STRIPE_DEGRADED, &sh->state);  			set_bit(WriteErrorSeen, &rdev->flags);  			set_bit(R5_WriteError, &sh->dev[i].flags);  			if (!test_and_set_bit(WantReplacement, &rdev->flags)) @@ -2017,24 +2165,20 @@ static void raid5_end_write_request(struct bio *bi, int error)  }  static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous); -	 +  static void raid5_build_block(struct stripe_head *sh, int i, int previous)  {  	struct r5dev *dev = &sh->dev[i];  	bio_init(&dev->req);  	dev->req.bi_io_vec = &dev->vec; -	dev->req.bi_vcnt++; -	dev->req.bi_max_vecs++; +	dev->req.bi_max_vecs = 1;  	dev->req.bi_private = sh; -	dev->vec.bv_page = dev->page;  	bio_init(&dev->rreq);  	dev->rreq.bi_io_vec = &dev->rvec; -	dev->rreq.bi_vcnt++; -	dev->rreq.bi_max_vecs++; +	dev->rreq.bi_max_vecs = 1;  	dev->rreq.bi_private = sh; -	dev->rvec.bv_page = dev->page;  	dev->flags = 0;  	dev->sector = compute_blocknr(sh, i, previous); @@ -2494,7 +2638,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in  	int firstwrite=0;  	pr_debug("adding bi b#%llu to stripe s#%llu\n", -		(unsigned long long)bi->bi_sector, +		(unsigned long long)bi->bi_iter.bi_sector,  		(unsigned long long)sh->sector);  	/* @@ -2512,12 +2656,12 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in  			firstwrite = 1;  	} else  		bip = &sh->dev[dd_idx].toread; -	while (*bip && (*bip)->bi_sector < bi->bi_sector) { -		if (bio_end_sector(*bip) > bi->bi_sector) +	while (*bip && (*bip)->bi_iter.bi_sector < bi->bi_iter.bi_sector) { +		if (bio_end_sector(*bip) > bi->bi_iter.bi_sector)  			goto overlap;  		bip = & (*bip)->bi_next;  	} -	if (*bip && (*bip)->bi_sector < bio_end_sector(bi)) +	if (*bip && (*bip)->bi_iter.bi_sector < bio_end_sector(bi))  		goto overlap;  	BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next); @@ -2531,7 +2675,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in  		sector_t sector = sh->dev[dd_idx].sector;  		for (bi=sh->dev[dd_idx].towrite;  		     sector < sh->dev[dd_idx].sector + STRIPE_SECTORS && -			     bi && bi->bi_sector <= sector; +			     bi && bi->bi_iter.bi_sector <= sector;  		     bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) {  			if (bio_end_sector(bi) >= sector)  				sector = bio_end_sector(bi); @@ -2541,7 +2685,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in  	}  	pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n", -		(unsigned long long)(*bip)->bi_sector, +		(unsigned long long)(*bip)->bi_iter.bi_sector,  		(unsigned long long)sh->sector, dd_idx);  	spin_unlock_irq(&sh->stripe_lock); @@ -2616,7 +2760,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,  		if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))  			wake_up(&conf->wait_for_overlap); -		while (bi && bi->bi_sector < +		while (bi && bi->bi_iter.bi_sector <  			sh->dev[i].sector + STRIPE_SECTORS) {  			struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);  			clear_bit(BIO_UPTODATE, &bi->bi_flags); @@ -2634,8 +2778,13 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,  		/* and fail all 'written' */  		bi = sh->dev[i].written;  		sh->dev[i].written = NULL; +		if (test_and_clear_bit(R5_SkipCopy, &sh->dev[i].flags)) { +			WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); +			sh->dev[i].page = sh->dev[i].orig_page; +		} +  		if (bi) bitmap_end = 1; -		while (bi && bi->bi_sector < +		while (bi && bi->bi_iter.bi_sector <  		       sh->dev[i].sector + STRIPE_SECTORS) {  			struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);  			clear_bit(BIO_UPTODATE, &bi->bi_flags); @@ -2659,7 +2808,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,  			spin_unlock_irq(&sh->stripe_lock);  			if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))  				wake_up(&conf->wait_for_overlap); -			while (bi && bi->bi_sector < +			while (bi && bi->bi_iter.bi_sector <  			       sh->dev[i].sector + STRIPE_SECTORS) {  				struct bio *nextbi =  					r5_next_bio(bi, sh->dev[i].sector); @@ -2770,8 +2919,11 @@ static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s,  	     (s->failed >= 1 && fdev[0]->toread) ||  	     (s->failed >= 2 && fdev[1]->toread) ||  	     (sh->raid_conf->level <= 5 && s->failed && fdev[0]->towrite && +	      (!test_bit(R5_Insync, &dev->flags) || test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) &&  	      !test_bit(R5_OVERWRITE, &fdev[0]->flags)) || -	     (sh->raid_conf->level == 6 && s->failed && s->to_write))) { +	     (sh->raid_conf->level == 6 && s->failed && s->to_write && +	      s->to_write < sh->raid_conf->raid_disks - 2 && +	      (!test_bit(R5_Insync, &dev->flags) || test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))))) {  		/* we would like to get this block, possibly by computing it,  		 * otherwise read it if the backing disk is insync  		 */ @@ -2875,15 +3027,20 @@ static void handle_stripe_clean_event(struct r5conf *conf,  			dev = &sh->dev[i];  			if (!test_bit(R5_LOCKED, &dev->flags) &&  			    (test_bit(R5_UPTODATE, &dev->flags) || -			     test_bit(R5_Discard, &dev->flags))) { +			     test_bit(R5_Discard, &dev->flags) || +			     test_bit(R5_SkipCopy, &dev->flags))) {  				/* We can return any write requests */  				struct bio *wbi, *wbi2;  				pr_debug("Return write for disc %d\n", i);  				if (test_and_clear_bit(R5_Discard, &dev->flags))  					clear_bit(R5_UPTODATE, &dev->flags); +				if (test_and_clear_bit(R5_SkipCopy, &dev->flags)) { +					WARN_ON(test_bit(R5_UPTODATE, &dev->flags)); +					dev->page = dev->orig_page; +				}  				wbi = dev->written;  				dev->written = NULL; -				while (wbi && wbi->bi_sector < +				while (wbi && wbi->bi_iter.bi_sector <  					dev->sector + STRIPE_SECTORS) {  					wbi2 = r5_next_bio(wbi, dev->sector);  					if (!raid5_dec_bi_active_stripes(wbi)) { @@ -2899,6 +3056,8 @@ static void handle_stripe_clean_event(struct r5conf *conf,  						0);  			} else if (test_bit(R5_Discard, &dev->flags))  				discard_pending = 1; +			WARN_ON(test_bit(R5_SkipCopy, &dev->flags)); +			WARN_ON(dev->page != dev->orig_page);  		}  	if (!discard_pending &&  	    test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)) { @@ -2910,6 +3069,14 @@ static void handle_stripe_clean_event(struct r5conf *conf,  		}  		/* now that discard is done we can proceed with any sync */  		clear_bit(STRIPE_DISCARD, &sh->state); +		/* +		 * SCSI discard will change some bio fields and the stripe has +		 * no updated data, so remove it from hash list and the stripe +		 * will be reinitialized +		 */ +		spin_lock_irq(&conf->device_lock); +		remove_hash(sh); +		spin_unlock_irq(&conf->device_lock);  		if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state))  			set_bit(STRIPE_HANDLE, &sh->state); @@ -2962,7 +3129,8 @@ static void handle_stripe_dirtying(struct r5conf *conf,  		    !test_bit(R5_LOCKED, &dev->flags) &&  		    !(test_bit(R5_UPTODATE, &dev->flags) ||  		    test_bit(R5_Wantcompute, &dev->flags))) { -			if (test_bit(R5_Insync, &dev->flags)) rcw++; +			if (test_bit(R5_Insync, &dev->flags)) +				rcw++;  			else  				rcw += 2*disks;  		} @@ -2983,10 +3151,10 @@ static void handle_stripe_dirtying(struct r5conf *conf,  			    !(test_bit(R5_UPTODATE, &dev->flags) ||  			    test_bit(R5_Wantcompute, &dev->flags)) &&  			    test_bit(R5_Insync, &dev->flags)) { -				if ( -				  test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { -					pr_debug("Read_old block " -						 "%d for r-m-w\n", i); +				if (test_bit(STRIPE_PREREAD_ACTIVE, +					     &sh->state)) { +					pr_debug("Read_old block %d for r-m-w\n", +						 i);  					set_bit(R5_LOCKED, &dev->flags);  					set_bit(R5_Wantread, &dev->flags);  					s->locked++; @@ -3009,10 +3177,9 @@ static void handle_stripe_dirtying(struct r5conf *conf,  			    !(test_bit(R5_UPTODATE, &dev->flags) ||  			      test_bit(R5_Wantcompute, &dev->flags))) {  				rcw++; -				if (!test_bit(R5_Insync, &dev->flags)) -					continue; /* it's a failed drive */ -				if ( -				  test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { +				if (test_bit(R5_Insync, &dev->flags) && +				    test_bit(STRIPE_PREREAD_ACTIVE, +					     &sh->state)) {  					pr_debug("Read_old block "  						"%d for Reconstruct\n", i);  					set_bit(R5_LOCKED, &dev->flags); @@ -3481,7 +3648,7 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)  			 */  			set_bit(R5_Insync, &dev->flags); -		if (rdev && test_bit(R5_WriteError, &dev->flags)) { +		if (test_bit(R5_WriteError, &dev->flags)) {  			/* This flag does not apply to '.replacement'  			 * only to .rdev, so make sure to check that*/  			struct md_rdev *rdev2 = rcu_dereference( @@ -3494,7 +3661,7 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)  			} else  				clear_bit(R5_WriteError, &dev->flags);  		} -		if (rdev && test_bit(R5_MadeGood, &dev->flags)) { +		if (test_bit(R5_MadeGood, &dev->flags)) {  			/* This flag does not apply to '.replacement'  			 * only to .rdev, so make sure to check that*/  			struct md_rdev *rdev2 = rcu_dereference( @@ -3894,7 +4061,8 @@ static void raid5_activate_delayed(struct r5conf *conf)  	}  } -static void activate_bit_delay(struct r5conf *conf) +static void activate_bit_delay(struct r5conf *conf, +	struct list_head *temp_inactive_list)  {  	/* device_lock is held */  	struct list_head head; @@ -3902,9 +4070,11 @@ static void activate_bit_delay(struct r5conf *conf)  	list_del_init(&conf->bitmap_list);  	while (!list_empty(&head)) {  		struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru); +		int hash;  		list_del_init(&sh->lru);  		atomic_inc(&sh->count); -		__release_stripe(conf, sh); +		hash = sh->hash_lock_index; +		__release_stripe(conf, sh, &temp_inactive_list[hash]);  	}  } @@ -3920,7 +4090,7 @@ int md_raid5_congested(struct mddev *mddev, int bits)  		return 1;  	if (conf->quiesce)  		return 1; -	if (list_empty_careful(&conf->inactive_list)) +	if (atomic_read(&conf->empty_inactive_list_nr))  		return 1;  	return 0; @@ -3964,7 +4134,7 @@ static int raid5_mergeable_bvec(struct request_queue *q,  static int in_chunk_boundary(struct mddev *mddev, struct bio *bio)  { -	sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); +	sector_t sector = bio->bi_iter.bi_sector + get_start_sect(bio->bi_bdev);  	unsigned int chunk_sectors = mddev->chunk_sectors;  	unsigned int bio_sectors = bio_sectors(bio); @@ -4101,9 +4271,9 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)  	/*  	 *	compute position  	 */ -	align_bi->bi_sector =  raid5_compute_sector(conf, raid_bio->bi_sector, -						    0, -						    &dd_idx, NULL); +	align_bi->bi_iter.bi_sector = +		raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector, +				     0, &dd_idx, NULL);  	end_sector = bio_end_sector(align_bi);  	rcu_read_lock(); @@ -4128,7 +4298,8 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)  		align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);  		if (!bio_fits_rdev(align_bi) || -		    is_badblock(rdev, align_bi->bi_sector, bio_sectors(align_bi), +		    is_badblock(rdev, align_bi->bi_iter.bi_sector, +				bio_sectors(align_bi),  				&first_bad, &bad_sectors)) {  			/* too big in some way, or has a known bad block */  			bio_put(align_bi); @@ -4137,7 +4308,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)  		}  		/* No reshape active, so we can trust rdev->data_offset */ -		align_bi->bi_sector += rdev->data_offset; +		align_bi->bi_iter.bi_sector += rdev->data_offset;  		spin_lock_irq(&conf->device_lock);  		wait_event_lock_irq(conf->wait_for_stripe, @@ -4149,7 +4320,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)  		if (mddev->gendisk)  			trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev),  					      align_bi, disk_devt(mddev->gendisk), -					      raid_bio->bi_sector); +					      raid_bio->bi_iter.bi_sector);  		generic_make_request(align_bi);  		return 1;  	} else { @@ -4242,14 +4413,14 @@ static struct stripe_head *__get_priority_stripe(struct r5conf *conf, int group)  		sh->group = NULL;  	}  	list_del_init(&sh->lru); -	atomic_inc(&sh->count); -	BUG_ON(atomic_read(&sh->count) != 1); +	BUG_ON(atomic_inc_return(&sh->count) != 1);  	return sh;  }  struct raid5_plug_cb {  	struct blk_plug_cb	cb;  	struct list_head	list; +	struct list_head	temp_inactive_list[NR_STRIPE_HASH_LOCKS];  };  static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule) @@ -4260,6 +4431,7 @@ static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)  	struct mddev *mddev = cb->cb.data;  	struct r5conf *conf = mddev->private;  	int cnt = 0; +	int hash;  	if (cb->list.next && !list_empty(&cb->list)) {  		spin_lock_irq(&conf->device_lock); @@ -4271,17 +4443,20 @@ static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)  			 * STRIPE_ON_UNPLUG_LIST clear but the stripe  			 * is still in our list  			 */ -			smp_mb__before_clear_bit(); +			smp_mb__before_atomic();  			clear_bit(STRIPE_ON_UNPLUG_LIST, &sh->state);  			/*  			 * STRIPE_ON_RELEASE_LIST could be set here. In that  			 * case, the count is always > 1 here  			 */ -			__release_stripe(conf, sh); +			hash = sh->hash_lock_index; +			__release_stripe(conf, sh, &cb->temp_inactive_list[hash]);  			cnt++;  		}  		spin_unlock_irq(&conf->device_lock);  	} +	release_inactive_stripe_list(conf, cb->temp_inactive_list, +				     NR_STRIPE_HASH_LOCKS);  	if (mddev->queue)  		trace_block_unplug(mddev->queue, cnt, !from_schedule);  	kfree(cb); @@ -4302,8 +4477,12 @@ static void release_stripe_plug(struct mddev *mddev,  	cb = container_of(blk_cb, struct raid5_plug_cb, cb); -	if (cb->list.next == NULL) +	if (cb->list.next == NULL) { +		int i;  		INIT_LIST_HEAD(&cb->list); +		for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) +			INIT_LIST_HEAD(cb->temp_inactive_list + i); +	}  	if (!test_and_set_bit(STRIPE_ON_UNPLUG_LIST, &sh->state))  		list_add_tail(&sh->lru, &cb->list); @@ -4323,8 +4502,8 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)  		/* Skip discard while reshape is happening */  		return; -	logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1); -	last_sector = bi->bi_sector + (bi->bi_size>>9); +	logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1); +	last_sector = bi->bi_iter.bi_sector + (bi->bi_iter.bi_size>>9);  	bi->bi_next = NULL;  	bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ @@ -4410,6 +4589,8 @@ static void make_request(struct mddev *mddev, struct bio * bi)  	struct stripe_head *sh;  	const int rw = bio_data_dir(bi);  	int remaining; +	DEFINE_WAIT(w); +	bool do_prepare;  	if (unlikely(bi->bi_rw & REQ_FLUSH)) {  		md_flush_request(mddev, bi); @@ -4428,20 +4609,23 @@ static void make_request(struct mddev *mddev, struct bio * bi)  		return;  	} -	logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1); +	logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1);  	last_sector = bio_end_sector(bi);  	bi->bi_next = NULL;  	bi->bi_phys_segments = 1;	/* over-loaded to count active stripes */ +	prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);  	for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) { -		DEFINE_WAIT(w);  		int previous;  		int seq; +		do_prepare = false;  	retry:  		seq = read_seqcount_begin(&conf->gen_lock);  		previous = 0; -		prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE); +		if (do_prepare) +			prepare_to_wait(&conf->wait_for_overlap, &w, +				TASK_UNINTERRUPTIBLE);  		if (unlikely(conf->reshape_progress != MaxSector)) {  			/* spinlock is needed as reshape_progress may be  			 * 64bit on a 32bit platform, and so it might be @@ -4462,6 +4646,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)  				    : logical_sector >= conf->reshape_safe) {  					spin_unlock_irq(&conf->device_lock);  					schedule(); +					do_prepare = true;  					goto retry;  				}  			} @@ -4498,6 +4683,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)  				if (must_retry) {  					release_stripe(sh);  					schedule(); +					do_prepare = true;  					goto retry;  				}  			} @@ -4521,8 +4707,10 @@ static void make_request(struct mddev *mddev, struct bio * bi)  				prepare_to_wait(&conf->wait_for_overlap,  						&w, TASK_INTERRUPTIBLE);  				if (logical_sector >= mddev->suspend_lo && -				    logical_sector < mddev->suspend_hi) +				    logical_sector < mddev->suspend_hi) {  					schedule(); +					do_prepare = true; +				}  				goto retry;  			} @@ -4535,9 +4723,9 @@ static void make_request(struct mddev *mddev, struct bio * bi)  				md_wakeup_thread(mddev->thread);  				release_stripe(sh);  				schedule(); +				do_prepare = true;  				goto retry;  			} -			finish_wait(&conf->wait_for_overlap, &w);  			set_bit(STRIPE_HANDLE, &sh->state);  			clear_bit(STRIPE_DELAYED, &sh->state);  			if ((bi->bi_rw & REQ_SYNC) && @@ -4547,10 +4735,10 @@ static void make_request(struct mddev *mddev, struct bio * bi)  		} else {  			/* cannot get stripe for read-ahead, just give-up */  			clear_bit(BIO_UPTODATE, &bi->bi_flags); -			finish_wait(&conf->wait_for_overlap, &w);  			break;  		}  	} +	finish_wait(&conf->wait_for_overlap, &w);  	remaining = raid5_dec_bi_active_stripes(bi);  	if (remaining == 0) { @@ -4686,14 +4874,19 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk  	    time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) {  		/* Cannot proceed until we've updated the superblock... */  		wait_event(conf->wait_for_overlap, -			   atomic_read(&conf->reshape_stripes)==0); +			   atomic_read(&conf->reshape_stripes)==0 +			   || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); +		if (atomic_read(&conf->reshape_stripes) != 0) +			return 0;  		mddev->reshape_position = conf->reshape_progress;  		mddev->curr_resync_completed = sector_nr;  		conf->reshape_checkpoint = jiffies;  		set_bit(MD_CHANGE_DEVS, &mddev->flags);  		md_wakeup_thread(mddev->thread);  		wait_event(mddev->sb_wait, mddev->flags == 0 || -			   kthread_should_stop()); +			   test_bit(MD_RECOVERY_INTR, &mddev->recovery)); +		if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) +			return 0;  		spin_lock_irq(&conf->device_lock);  		conf->reshape_safe = mddev->reshape_position;  		spin_unlock_irq(&conf->device_lock); @@ -4776,7 +4969,10 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk  	    >= mddev->resync_max - mddev->curr_resync_completed) {  		/* Cannot proceed until we've updated the superblock... */  		wait_event(conf->wait_for_overlap, -			   atomic_read(&conf->reshape_stripes) == 0); +			   atomic_read(&conf->reshape_stripes) == 0 +			   || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); +		if (atomic_read(&conf->reshape_stripes) != 0) +			goto ret;  		mddev->reshape_position = conf->reshape_progress;  		mddev->curr_resync_completed = sector_nr;  		conf->reshape_checkpoint = jiffies; @@ -4784,13 +4980,16 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk  		md_wakeup_thread(mddev->thread);  		wait_event(mddev->sb_wait,  			   !test_bit(MD_CHANGE_DEVS, &mddev->flags) -			   || kthread_should_stop()); +			   || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); +		if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) +			goto ret;  		spin_lock_irq(&conf->device_lock);  		conf->reshape_safe = mddev->reshape_position;  		spin_unlock_irq(&conf->device_lock);  		wake_up(&conf->wait_for_overlap);  		sysfs_notify(&mddev->kobj, NULL, "sync_completed");  	} +ret:  	return reshape_sectors;  } @@ -4875,8 +5074,8 @@ static inline sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int  	bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded);  	set_bit(STRIPE_SYNC_REQUESTED, &sh->state); +	set_bit(STRIPE_HANDLE, &sh->state); -	handle_stripe(sh);  	release_stripe(sh);  	return STRIPE_SECTORS; @@ -4901,7 +5100,8 @@ static int  retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)  	int remaining;  	int handled = 0; -	logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1); +	logical_sector = raid_bio->bi_iter.bi_sector & +		~((sector_t)STRIPE_SECTORS-1);  	sector = raid5_compute_sector(conf, logical_sector,  				      0, &dd_idx, NULL);  	last_sector = bio_end_sector(raid_bio); @@ -4915,7 +5115,7 @@ static int  retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)  			/* already done this stripe */  			continue; -		sh = get_active_stripe(conf, sector, 0, 1, 0); +		sh = get_active_stripe(conf, sector, 0, 1, 1);  		if (!sh) {  			/* failed to get a stripe - must wait */ @@ -4948,27 +5148,45 @@ static int  retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)  }  static int handle_active_stripes(struct r5conf *conf, int group, -				 struct r5worker *worker) +				 struct r5worker *worker, +				 struct list_head *temp_inactive_list)  {  	struct stripe_head *batch[MAX_STRIPE_BATCH], *sh; -	int i, batch_size = 0; +	int i, batch_size = 0, hash; +	bool release_inactive = false;  	while (batch_size < MAX_STRIPE_BATCH &&  			(sh = __get_priority_stripe(conf, group)) != NULL)  		batch[batch_size++] = sh; -	if (batch_size == 0) -		return batch_size; +	if (batch_size == 0) { +		for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) +			if (!list_empty(temp_inactive_list + i)) +				break; +		if (i == NR_STRIPE_HASH_LOCKS) +			return batch_size; +		release_inactive = true; +	}  	spin_unlock_irq(&conf->device_lock); +	release_inactive_stripe_list(conf, temp_inactive_list, +				     NR_STRIPE_HASH_LOCKS); + +	if (release_inactive) { +		spin_lock_irq(&conf->device_lock); +		return 0; +	} +  	for (i = 0; i < batch_size; i++)  		handle_stripe(batch[i]);  	cond_resched();  	spin_lock_irq(&conf->device_lock); -	for (i = 0; i < batch_size; i++) -		__release_stripe(conf, batch[i]); +	for (i = 0; i < batch_size; i++) { +		hash = batch[i]->hash_lock_index; +		__release_stripe(conf, batch[i], &temp_inactive_list[hash]); +	}  	return batch_size;  } @@ -4989,9 +5207,10 @@ static void raid5_do_work(struct work_struct *work)  	while (1) {  		int batch_size, released; -		released = release_stripe_list(conf); +		released = release_stripe_list(conf, worker->temp_inactive_list); -		batch_size = handle_active_stripes(conf, group_id, worker); +		batch_size = handle_active_stripes(conf, group_id, worker, +						   worker->temp_inactive_list);  		worker->working = false;  		if (!batch_size && !released)  			break; @@ -5030,7 +5249,7 @@ static void raid5d(struct md_thread *thread)  		struct bio *bio;  		int batch_size, released; -		released = release_stripe_list(conf); +		released = release_stripe_list(conf, conf->temp_inactive_list);  		if (  		    !list_empty(&conf->bitmap_list)) { @@ -5040,7 +5259,7 @@ static void raid5d(struct md_thread *thread)  			bitmap_unplug(mddev->bitmap);  			spin_lock_irq(&conf->device_lock);  			conf->seq_write = conf->seq_flush; -			activate_bit_delay(conf); +			activate_bit_delay(conf, conf->temp_inactive_list);  		}  		raid5_activate_delayed(conf); @@ -5054,7 +5273,8 @@ static void raid5d(struct md_thread *thread)  			handled++;  		} -		batch_size = handle_active_stripes(conf, ANY_GROUP, NULL); +		batch_size = handle_active_stripes(conf, ANY_GROUP, NULL, +						   conf->temp_inactive_list);  		if (!batch_size && !released)  			break;  		handled += batch_size; @@ -5090,22 +5310,29 @@ raid5_set_cache_size(struct mddev *mddev, int size)  {  	struct r5conf *conf = mddev->private;  	int err; +	int hash;  	if (size <= 16 || size > 32768)  		return -EINVAL; +	hash = (conf->max_nr_stripes - 1) % NR_STRIPE_HASH_LOCKS;  	while (size < conf->max_nr_stripes) { -		if (drop_one_stripe(conf)) +		if (drop_one_stripe(conf, hash))  			conf->max_nr_stripes--;  		else  			break; +		hash--; +		if (hash < 0) +			hash = NR_STRIPE_HASH_LOCKS - 1;  	}  	err = md_allow_write(mddev);  	if (err)  		return err; +	hash = conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS;  	while (size > conf->max_nr_stripes) { -		if (grow_one_stripe(conf)) +		if (grow_one_stripe(conf, hash))  			conf->max_nr_stripes++;  		else break; +		hash = (hash + 1) % NR_STRIPE_HASH_LOCKS;  	}  	return 0;  } @@ -5171,6 +5398,50 @@ raid5_preread_bypass_threshold = __ATTR(preread_bypass_threshold,  					raid5_store_preread_threshold);  static ssize_t +raid5_show_skip_copy(struct mddev *mddev, char *page) +{ +	struct r5conf *conf = mddev->private; +	if (conf) +		return sprintf(page, "%d\n", conf->skip_copy); +	else +		return 0; +} + +static ssize_t +raid5_store_skip_copy(struct mddev *mddev, const char *page, size_t len) +{ +	struct r5conf *conf = mddev->private; +	unsigned long new; +	if (len >= PAGE_SIZE) +		return -EINVAL; +	if (!conf) +		return -ENODEV; + +	if (kstrtoul(page, 10, &new)) +		return -EINVAL; +	new = !!new; +	if (new == conf->skip_copy) +		return len; + +	mddev_suspend(mddev); +	conf->skip_copy = new; +	if (new) +		mddev->queue->backing_dev_info.capabilities |= +						BDI_CAP_STABLE_WRITES; +	else +		mddev->queue->backing_dev_info.capabilities &= +						~BDI_CAP_STABLE_WRITES; +	mddev_resume(mddev); +	return len; +} + +static struct md_sysfs_entry +raid5_skip_copy = __ATTR(skip_copy, S_IRUGO | S_IWUSR, +					raid5_show_skip_copy, +					raid5_store_skip_copy); + + +static ssize_t  stripe_cache_active_show(struct mddev *mddev, char *page)  {  	struct r5conf *conf = mddev->private; @@ -5193,15 +5464,18 @@ raid5_show_group_thread_cnt(struct mddev *mddev, char *page)  		return 0;  } -static int alloc_thread_groups(struct r5conf *conf, int cnt); +static int alloc_thread_groups(struct r5conf *conf, int cnt, +			       int *group_cnt, +			       int *worker_cnt_per_group, +			       struct r5worker_group **worker_groups);  static ssize_t  raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len)  {  	struct r5conf *conf = mddev->private;  	unsigned long new;  	int err; -	struct r5worker_group *old_groups; -	int old_group_cnt; +	struct r5worker_group *new_groups, *old_groups; +	int group_cnt, worker_cnt_per_group;  	if (len >= PAGE_SIZE)  		return -EINVAL; @@ -5217,14 +5491,19 @@ raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len)  	mddev_suspend(mddev);  	old_groups = conf->worker_groups; -	old_group_cnt = conf->worker_cnt_per_group; +	if (old_groups) +		flush_workqueue(raid5_wq); + +	err = alloc_thread_groups(conf, new, +				  &group_cnt, &worker_cnt_per_group, +				  &new_groups); +	if (!err) { +		spin_lock_irq(&conf->device_lock); +		conf->group_cnt = group_cnt; +		conf->worker_cnt_per_group = worker_cnt_per_group; +		conf->worker_groups = new_groups; +		spin_unlock_irq(&conf->device_lock); -	conf->worker_groups = NULL; -	err = alloc_thread_groups(conf, new); -	if (err) { -		conf->worker_groups = old_groups; -		conf->worker_cnt_per_group = old_group_cnt; -	} else {  		if (old_groups)  			kfree(old_groups[0].workers);  		kfree(old_groups); @@ -5247,6 +5526,7 @@ static struct attribute *raid5_attrs[] =  {  	&raid5_stripecache_active.attr,  	&raid5_preread_bypass_threshold.attr,  	&raid5_group_thread_cnt.attr, +	&raid5_skip_copy.attr,  	NULL,  };  static struct attribute_group raid5_attrs_group = { @@ -5254,40 +5534,47 @@ static struct attribute_group raid5_attrs_group = {  	.attrs = raid5_attrs,  }; -static int alloc_thread_groups(struct r5conf *conf, int cnt) +static int alloc_thread_groups(struct r5conf *conf, int cnt, +			       int *group_cnt, +			       int *worker_cnt_per_group, +			       struct r5worker_group **worker_groups)  { -	int i, j; +	int i, j, k;  	ssize_t size;  	struct r5worker *workers; -	conf->worker_cnt_per_group = cnt; +	*worker_cnt_per_group = cnt;  	if (cnt == 0) { -		conf->worker_groups = NULL; +		*group_cnt = 0; +		*worker_groups = NULL;  		return 0;  	} -	conf->group_cnt = num_possible_nodes(); +	*group_cnt = num_possible_nodes();  	size = sizeof(struct r5worker) * cnt; -	workers = kzalloc(size * conf->group_cnt, GFP_NOIO); -	conf->worker_groups = kzalloc(sizeof(struct r5worker_group) * -				conf->group_cnt, GFP_NOIO); -	if (!conf->worker_groups || !workers) { +	workers = kzalloc(size * *group_cnt, GFP_NOIO); +	*worker_groups = kzalloc(sizeof(struct r5worker_group) * +				*group_cnt, GFP_NOIO); +	if (!*worker_groups || !workers) {  		kfree(workers); -		kfree(conf->worker_groups); -		conf->worker_groups = NULL; +		kfree(*worker_groups);  		return -ENOMEM;  	} -	for (i = 0; i < conf->group_cnt; i++) { +	for (i = 0; i < *group_cnt; i++) {  		struct r5worker_group *group; -		group = &conf->worker_groups[i]; +		group = &(*worker_groups)[i];  		INIT_LIST_HEAD(&group->handle_list);  		group->conf = conf;  		group->workers = workers + i * cnt;  		for (j = 0; j < cnt; j++) { -			group->workers[j].group = group; -			INIT_WORK(&group->workers[j].work, raid5_do_work); +			struct r5worker *worker = group->workers + j; +			worker->group = group; +			INIT_WORK(&worker->work, raid5_do_work); + +			for (k = 0; k < NR_STRIPE_HASH_LOCKS; k++) +				INIT_LIST_HEAD(worker->temp_inactive_list + k);  		}  	} @@ -5318,23 +5605,43 @@ raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks)  	return sectors * (raid_disks - conf->max_degraded);  } +static void free_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu) +{ +	safe_put_page(percpu->spare_page); +	kfree(percpu->scribble); +	percpu->spare_page = NULL; +	percpu->scribble = NULL; +} + +static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu) +{ +	if (conf->level == 6 && !percpu->spare_page) +		percpu->spare_page = alloc_page(GFP_KERNEL); +	if (!percpu->scribble) +		percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL); + +	if (!percpu->scribble || (conf->level == 6 && !percpu->spare_page)) { +		free_scratch_buffer(conf, percpu); +		return -ENOMEM; +	} + +	return 0; +} +  static void raid5_free_percpu(struct r5conf *conf)  { -	struct raid5_percpu *percpu;  	unsigned long cpu;  	if (!conf->percpu)  		return; -	get_online_cpus(); -	for_each_possible_cpu(cpu) { -		percpu = per_cpu_ptr(conf->percpu, cpu); -		safe_put_page(percpu->spare_page); -		kfree(percpu->scribble); -	}  #ifdef CONFIG_HOTPLUG_CPU  	unregister_cpu_notifier(&conf->cpu_notify);  #endif + +	get_online_cpus(); +	for_each_possible_cpu(cpu) +		free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));  	put_online_cpus();  	free_percpu(conf->percpu); @@ -5361,15 +5668,7 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,  	switch (action) {  	case CPU_UP_PREPARE:  	case CPU_UP_PREPARE_FROZEN: -		if (conf->level == 6 && !percpu->spare_page) -			percpu->spare_page = alloc_page(GFP_KERNEL); -		if (!percpu->scribble) -			percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL); - -		if (!percpu->scribble || -		    (conf->level == 6 && !percpu->spare_page)) { -			safe_put_page(percpu->spare_page); -			kfree(percpu->scribble); +		if (alloc_scratch_buffer(conf, percpu)) {  			pr_err("%s: failed memory allocation for cpu%ld\n",  			       __func__, cpu);  			return notifier_from_errno(-ENOMEM); @@ -5377,10 +5676,7 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,  		break;  	case CPU_DEAD:  	case CPU_DEAD_FROZEN: -		safe_put_page(percpu->spare_page); -		kfree(percpu->scribble); -		percpu->spare_page = NULL; -		percpu->scribble = NULL; +		free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));  		break;  	default:  		break; @@ -5392,40 +5688,29 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,  static int raid5_alloc_percpu(struct r5conf *conf)  {  	unsigned long cpu; -	struct page *spare_page; -	struct raid5_percpu __percpu *allcpus; -	void *scribble; -	int err; +	int err = 0; -	allcpus = alloc_percpu(struct raid5_percpu); -	if (!allcpus) +	conf->percpu = alloc_percpu(struct raid5_percpu); +	if (!conf->percpu)  		return -ENOMEM; -	conf->percpu = allcpus; + +#ifdef CONFIG_HOTPLUG_CPU +	conf->cpu_notify.notifier_call = raid456_cpu_notify; +	conf->cpu_notify.priority = 0; +	err = register_cpu_notifier(&conf->cpu_notify); +	if (err) +		return err; +#endif  	get_online_cpus(); -	err = 0;  	for_each_present_cpu(cpu) { -		if (conf->level == 6) { -			spare_page = alloc_page(GFP_KERNEL); -			if (!spare_page) { -				err = -ENOMEM; -				break; -			} -			per_cpu_ptr(conf->percpu, cpu)->spare_page = spare_page; -		} -		scribble = kmalloc(conf->scribble_len, GFP_KERNEL); -		if (!scribble) { -			err = -ENOMEM; +		err = alloc_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu)); +		if (err) { +			pr_err("%s: failed memory allocation for cpu%ld\n", +			       __func__, cpu);  			break;  		} -		per_cpu_ptr(conf->percpu, cpu)->scribble = scribble;  	} -#ifdef CONFIG_HOTPLUG_CPU -	conf->cpu_notify.notifier_call = raid456_cpu_notify; -	conf->cpu_notify.priority = 0; -	if (err == 0) -		err = register_cpu_notifier(&conf->cpu_notify); -#endif  	put_online_cpus();  	return err; @@ -5438,6 +5723,9 @@ static struct r5conf *setup_conf(struct mddev *mddev)  	struct md_rdev *rdev;  	struct disk_info *disk;  	char pers_name[6]; +	int i; +	int group_cnt, worker_cnt_per_group; +	struct r5worker_group *new_group;  	if (mddev->new_level != 5  	    && mddev->new_level != 4 @@ -5472,7 +5760,12 @@ static struct r5conf *setup_conf(struct mddev *mddev)  	if (conf == NULL)  		goto abort;  	/* Don't enable multi-threading by default*/ -	if (alloc_thread_groups(conf, 0)) +	if (!alloc_thread_groups(conf, 0, &group_cnt, &worker_cnt_per_group, +				 &new_group)) { +		conf->group_cnt = group_cnt; +		conf->worker_cnt_per_group = worker_cnt_per_group; +		conf->worker_groups = new_group; +	} else  		goto abort;  	spin_lock_init(&conf->device_lock);  	seqcount_init(&conf->gen_lock); @@ -5482,7 +5775,6 @@ static struct r5conf *setup_conf(struct mddev *mddev)  	INIT_LIST_HEAD(&conf->hold_list);  	INIT_LIST_HEAD(&conf->delayed_list);  	INIT_LIST_HEAD(&conf->bitmap_list); -	INIT_LIST_HEAD(&conf->inactive_list);  	init_llist_head(&conf->released_stripes);  	atomic_set(&conf->active_stripes, 0);  	atomic_set(&conf->preread_active_stripes, 0); @@ -5508,6 +5800,21 @@ static struct r5conf *setup_conf(struct mddev *mddev)  	if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)  		goto abort; +	/* We init hash_locks[0] separately to that it can be used +	 * as the reference lock in the spin_lock_nest_lock() call +	 * in lock_all_device_hash_locks_irq in order to convince +	 * lockdep that we know what we are doing. +	 */ +	spin_lock_init(conf->hash_locks); +	for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++) +		spin_lock_init(conf->hash_locks + i); + +	for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) +		INIT_LIST_HEAD(conf->inactive_list + i); + +	for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) +		INIT_LIST_HEAD(conf->temp_inactive_list + i); +  	conf->level = mddev->new_level;  	if (raid5_alloc_percpu(conf) != 0)  		goto abort; @@ -5548,7 +5855,6 @@ static struct r5conf *setup_conf(struct mddev *mddev)  	else  		conf->max_degraded = 1;  	conf->algorithm = mddev->new_layout; -	conf->max_nr_stripes = NR_STRIPES;  	conf->reshape_progress = mddev->reshape_position;  	if (conf->reshape_progress != MaxSector) {  		conf->prev_chunk_sectors = mddev->chunk_sectors; @@ -5557,7 +5863,8 @@ static struct r5conf *setup_conf(struct mddev *mddev)  	memory = conf->max_nr_stripes * (sizeof(struct stripe_head) +  		 max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024; -	if (grow_stripes(conf, conf->max_nr_stripes)) { +	atomic_set(&conf->empty_inactive_list_nr, NR_STRIPE_HASH_LOCKS); +	if (grow_stripes(conf, NR_STRIPES)) {  		printk(KERN_ERR  		       "md/raid:%s: couldn't allocate %dkB for buffers\n",  		       mdname(mddev), memory); @@ -5885,6 +6192,7 @@ static int run(struct mddev *mddev)  		blk_queue_io_min(mddev->queue, chunk_size);  		blk_queue_io_opt(mddev->queue, chunk_size *  				 (conf->raid_disks - conf->max_degraded)); +		mddev->queue->limits.raid_partial_stripes_expensive = 1;  		/*  		 * We can only discard a whole stripe. It doesn't make sense to  		 * discard data disk but write parity disk @@ -6363,12 +6671,18 @@ static int raid5_start_reshape(struct mddev *mddev)  	if (!mddev->sync_thread) {  		mddev->recovery = 0;  		spin_lock_irq(&conf->device_lock); +		write_seqcount_begin(&conf->gen_lock);  		mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks; +		mddev->new_chunk_sectors = +			conf->chunk_sectors = conf->prev_chunk_sectors; +		mddev->new_layout = conf->algorithm = conf->prev_algo;  		rdev_for_each(rdev, mddev)  			rdev->new_data_offset = rdev->data_offset;  		smp_wmb(); +		conf->generation --;  		conf->reshape_progress = MaxSector;  		mddev->reshape_position = MaxSector; +		write_seqcount_end(&conf->gen_lock);  		spin_unlock_irq(&conf->device_lock);  		return -EAGAIN;  	} @@ -6456,27 +6770,28 @@ static void raid5_quiesce(struct mddev *mddev, int state)  		break;  	case 1: /* stop all writes */ -		spin_lock_irq(&conf->device_lock); +		lock_all_device_hash_locks_irq(conf);  		/* '2' tells resync/reshape to pause so that all  		 * active stripes can drain  		 */  		conf->quiesce = 2; -		wait_event_lock_irq(conf->wait_for_stripe, +		wait_event_cmd(conf->wait_for_stripe,  				    atomic_read(&conf->active_stripes) == 0 &&  				    atomic_read(&conf->active_aligned_reads) == 0, -				    conf->device_lock); +				    unlock_all_device_hash_locks_irq(conf), +				    lock_all_device_hash_locks_irq(conf));  		conf->quiesce = 1; -		spin_unlock_irq(&conf->device_lock); +		unlock_all_device_hash_locks_irq(conf);  		/* allow reshape to continue */  		wake_up(&conf->wait_for_overlap);  		break;  	case 0: /* re-enable writes */ -		spin_lock_irq(&conf->device_lock); +		lock_all_device_hash_locks_irq(conf);  		conf->quiesce = 0;  		wake_up(&conf->wait_for_stripe);  		wake_up(&conf->wait_for_overlap); -		spin_unlock_irq(&conf->device_lock); +		unlock_all_device_hash_locks_irq(conf);  		break;  	}  } diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index 2113ffa82c7..bc72cd4be5f 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h @@ -49,7 +49,7 @@   * can't distinguish between a clean block that has been generated   * from parity calculations, and a clean block that has been   * successfully written to the spare ( or to parity when resyncing). - * To distingush these states we have a stripe bit STRIPE_INSYNC that + * To distinguish these states we have a stripe bit STRIPE_INSYNC that   * is set whenever a write is scheduled to the spare, or to the parity   * disc if there is no spare.  A sync request clears this bit, and   * when we find it set with no buffers locked, we know the sync is @@ -205,6 +205,7 @@ struct stripe_head {  	short			pd_idx;		/* parity disk index */  	short			qd_idx;		/* 'Q' disk index for raid6 */  	short			ddf_layout;/* use DDF ordering to calculate Q */ +	short			hash_lock_index;  	unsigned long		state;		/* state flags */  	atomic_t		count;	      /* nr of active thread/requests */  	int			bm_seq;	/* sequence number for bitmap flushes */ @@ -231,7 +232,7 @@ struct stripe_head {  		 */  		struct bio	req, rreq;  		struct bio_vec	vec, rvec; -		struct page	*page; +		struct page	*page, *orig_page;  		struct bio	*toread, *read, *towrite, *written;  		sector_t	sector;			/* sector of this page */  		unsigned long	flags; @@ -298,6 +299,7 @@ enum r5dev_flags {  			 * data in, and now is a good time to write it out.  			 */  	R5_Discard,	/* Discard the stripe */ +	R5_SkipCopy,	/* Don't copy data from bio to stripe cache */  };  /* @@ -367,9 +369,18 @@ struct disk_info {  	struct md_rdev	*rdev, *replacement;  }; +/* NOTE NR_STRIPE_HASH_LOCKS must remain below 64. + * This is because we sometimes take all the spinlocks + * and creating that much locking depth can cause + * problems. + */ +#define NR_STRIPE_HASH_LOCKS 8 +#define STRIPE_HASH_LOCKS_MASK (NR_STRIPE_HASH_LOCKS - 1) +  struct r5worker {  	struct work_struct work;  	struct r5worker_group *group; +	struct list_head temp_inactive_list[NR_STRIPE_HASH_LOCKS];  	bool working;  }; @@ -382,6 +393,8 @@ struct r5worker_group {  struct r5conf {  	struct hlist_head	*stripe_hashtbl; +	/* only protect corresponding hash list and inactive_list */ +	spinlock_t		hash_locks[NR_STRIPE_HASH_LOCKS];  	struct mddev		*mddev;  	int			chunk_sectors;  	int			level, algorithm; @@ -424,6 +437,7 @@ struct r5conf {  	atomic_t		pending_full_writes; /* full write backlog */  	int			bypass_count; /* bypassed prereads */  	int			bypass_threshold; /* preread nice */ +	int			skip_copy; /* Don't copy data from bio to stripe cache */  	struct list_head	*last_hold; /* detect hold_list promotions */  	atomic_t		reshape_stripes; /* stripes with pending writes for reshape */ @@ -462,7 +476,8 @@ struct r5conf {  	 * Free stripes pool  	 */  	atomic_t		active_stripes; -	struct list_head	inactive_list; +	struct list_head	inactive_list[NR_STRIPE_HASH_LOCKS]; +	atomic_t		empty_inactive_list_nr;  	struct llist_head	released_stripes;  	wait_queue_head_t	wait_for_stripe;  	wait_queue_head_t	wait_for_overlap; @@ -477,6 +492,7 @@ struct r5conf {  	 * the new thread here until we fully activate the array.  	 */  	struct md_thread	*thread; +	struct list_head	temp_inactive_list[NR_STRIPE_HASH_LOCKS];  	struct r5worker_group	*worker_groups;  	int			group_cnt;  	int			worker_cnt_per_group;  | 
