diff options
Diffstat (limited to 'drivers/md/bcache/request.c')
| -rw-r--r-- | drivers/md/bcache/request.c | 1370 | 
1 files changed, 577 insertions, 793 deletions
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 71eb233b9ac..15fff4f68a7 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -12,11 +12,9 @@  #include "request.h"  #include "writeback.h" -#include <linux/cgroup.h>  #include <linux/module.h>  #include <linux/hash.h>  #include <linux/random.h> -#include "blk-cgroup.h"  #include <trace/events/bcache.h> @@ -25,187 +23,28 @@  struct kmem_cache *bch_search_cache; -static void check_should_skip(struct cached_dev *, struct search *); - -/* Cgroup interface */ - -#ifdef CONFIG_CGROUP_BCACHE -static struct bch_cgroup bcache_default_cgroup = { .cache_mode = -1 }; - -static struct bch_cgroup *cgroup_to_bcache(struct cgroup *cgroup) -{ -	struct cgroup_subsys_state *css; -	return cgroup && -		(css = cgroup_subsys_state(cgroup, bcache_subsys_id)) -		? container_of(css, struct bch_cgroup, css) -		: &bcache_default_cgroup; -} - -struct bch_cgroup *bch_bio_to_cgroup(struct bio *bio) -{ -	struct cgroup_subsys_state *css = bio->bi_css -		? cgroup_subsys_state(bio->bi_css->cgroup, bcache_subsys_id) -		: task_subsys_state(current, bcache_subsys_id); - -	return css -		? container_of(css, struct bch_cgroup, css) -		: &bcache_default_cgroup; -} - -static ssize_t cache_mode_read(struct cgroup *cgrp, struct cftype *cft, -			struct file *file, -			char __user *buf, size_t nbytes, loff_t *ppos) -{ -	char tmp[1024]; -	int len = bch_snprint_string_list(tmp, PAGE_SIZE, bch_cache_modes, -					  cgroup_to_bcache(cgrp)->cache_mode + 1); - -	if (len < 0) -		return len; - -	return simple_read_from_buffer(buf, nbytes, ppos, tmp, len); -} - -static int cache_mode_write(struct cgroup *cgrp, struct cftype *cft, -			    const char *buf) -{ -	int v = bch_read_string_list(buf, bch_cache_modes); -	if (v < 0) -		return v; - -	cgroup_to_bcache(cgrp)->cache_mode = v - 1; -	return 0; -} - -static u64 bch_verify_read(struct cgroup *cgrp, struct cftype *cft) -{ -	return cgroup_to_bcache(cgrp)->verify; -} - -static int bch_verify_write(struct cgroup *cgrp, struct cftype *cft, u64 val) -{ -	cgroup_to_bcache(cgrp)->verify = val; -	return 0; -} - -static u64 bch_cache_hits_read(struct cgroup *cgrp, struct cftype *cft) -{ -	struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp); -	return atomic_read(&bcachecg->stats.cache_hits); -} - -static u64 bch_cache_misses_read(struct cgroup *cgrp, struct cftype *cft) -{ -	struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp); -	return atomic_read(&bcachecg->stats.cache_misses); -} - -static u64 bch_cache_bypass_hits_read(struct cgroup *cgrp, -					 struct cftype *cft) -{ -	struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp); -	return atomic_read(&bcachecg->stats.cache_bypass_hits); -} - -static u64 bch_cache_bypass_misses_read(struct cgroup *cgrp, -					   struct cftype *cft) -{ -	struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp); -	return atomic_read(&bcachecg->stats.cache_bypass_misses); -} - -static struct cftype bch_files[] = { -	{ -		.name		= "cache_mode", -		.read		= cache_mode_read, -		.write_string	= cache_mode_write, -	}, -	{ -		.name		= "verify", -		.read_u64	= bch_verify_read, -		.write_u64	= bch_verify_write, -	}, -	{ -		.name		= "cache_hits", -		.read_u64	= bch_cache_hits_read, -	}, -	{ -		.name		= "cache_misses", -		.read_u64	= bch_cache_misses_read, -	}, -	{ -		.name		= "cache_bypass_hits", -		.read_u64	= bch_cache_bypass_hits_read, -	}, -	{ -		.name		= "cache_bypass_misses", -		.read_u64	= bch_cache_bypass_misses_read, -	}, -	{ }	/* terminate */ -}; - -static void init_bch_cgroup(struct bch_cgroup *cg) -{ -	cg->cache_mode = -1; -} - -static struct cgroup_subsys_state *bcachecg_create(struct cgroup *cgroup) -{ -	struct bch_cgroup *cg; - -	cg = kzalloc(sizeof(*cg), GFP_KERNEL); -	if (!cg) -		return ERR_PTR(-ENOMEM); -	init_bch_cgroup(cg); -	return &cg->css; -} - -static void bcachecg_destroy(struct cgroup *cgroup) -{ -	struct bch_cgroup *cg = cgroup_to_bcache(cgroup); -	free_css_id(&bcache_subsys, &cg->css); -	kfree(cg); -} - -struct cgroup_subsys bcache_subsys = { -	.create		= bcachecg_create, -	.destroy	= bcachecg_destroy, -	.subsys_id	= bcache_subsys_id, -	.name		= "bcache", -	.module		= THIS_MODULE, -}; -EXPORT_SYMBOL_GPL(bcache_subsys); -#endif +static void bch_data_insert_start(struct closure *);  static unsigned cache_mode(struct cached_dev *dc, struct bio *bio)  { -#ifdef CONFIG_CGROUP_BCACHE -	int r = bch_bio_to_cgroup(bio)->cache_mode; -	if (r >= 0) -		return r; -#endif  	return BDEV_CACHE_MODE(&dc->sb);  }  static bool verify(struct cached_dev *dc, struct bio *bio)  { -#ifdef CONFIG_CGROUP_BCACHE -	if (bch_bio_to_cgroup(bio)->verify) -		return true; -#endif  	return dc->verify;  }  static void bio_csum(struct bio *bio, struct bkey *k)  { -	struct bio_vec *bv; +	struct bio_vec bv; +	struct bvec_iter iter;  	uint64_t csum = 0; -	int i; -	bio_for_each_segment(bv, bio, i) { -		void *d = kmap(bv->bv_page) + bv->bv_offset; -		csum = bch_crc64_update(csum, d, bv->bv_len); -		kunmap(bv->bv_page); +	bio_for_each_segment(bv, bio, iter) { +		void *d = kmap(bv.bv_page) + bv.bv_offset; +		csum = bch_crc64_update(csum, d, bv.bv_len); +		kunmap(bv.bv_page);  	}  	k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1); @@ -213,221 +52,97 @@ static void bio_csum(struct bio *bio, struct bkey *k)  /* Insert data into cache */ -static void bio_invalidate(struct closure *cl) +static void bch_data_insert_keys(struct closure *cl)  { -	struct btree_op *op = container_of(cl, struct btree_op, cl); -	struct bio *bio = op->cache_bio; +	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); +	atomic_t *journal_ref = NULL; +	struct bkey *replace_key = op->replace ? &op->replace_key : NULL; +	int ret; -	pr_debug("invalidating %i sectors from %llu", -		 bio_sectors(bio), (uint64_t) bio->bi_sector); - -	while (bio_sectors(bio)) { -		unsigned len = min(bio_sectors(bio), 1U << 14); - -		if (bch_keylist_realloc(&op->keys, 0, op->c)) -			goto out; - -		bio->bi_sector	+= len; -		bio->bi_size	-= len << 9; - -		bch_keylist_add(&op->keys, -				&KEY(op->inode, bio->bi_sector, len)); -	} - -	op->insert_data_done = true; -	bio_put(bio); -out: -	continue_at(cl, bch_journal, bcache_wq); -} - -struct open_bucket { -	struct list_head	list; -	struct task_struct	*last; -	unsigned		sectors_free; -	BKEY_PADDED(key); -}; - -void bch_open_buckets_free(struct cache_set *c) -{ -	struct open_bucket *b; - -	while (!list_empty(&c->data_buckets)) { -		b = list_first_entry(&c->data_buckets, -				     struct open_bucket, list); -		list_del(&b->list); -		kfree(b); -	} -} - -int bch_open_buckets_alloc(struct cache_set *c) -{ -	int i; - -	spin_lock_init(&c->data_bucket_lock); +	/* +	 * If we're looping, might already be waiting on +	 * another journal write - can't wait on more than one journal write at +	 * a time +	 * +	 * XXX: this looks wrong +	 */ +#if 0 +	while (atomic_read(&s->cl.remaining) & CLOSURE_WAITING) +		closure_sync(&s->cl); +#endif -	for (i = 0; i < 6; i++) { -		struct open_bucket *b = kzalloc(sizeof(*b), GFP_KERNEL); -		if (!b) -			return -ENOMEM; +	if (!op->replace) +		journal_ref = bch_journal(op->c, &op->insert_keys, +					  op->flush_journal ? cl : NULL); -		list_add(&b->list, &c->data_buckets); +	ret = bch_btree_insert(op->c, &op->insert_keys, +			       journal_ref, replace_key); +	if (ret == -ESRCH) { +		op->replace_collision = true; +	} else if (ret) { +		op->error		= -ENOMEM; +		op->insert_data_done	= true;  	} -	return 0; -} - -/* - * We keep multiple buckets open for writes, and try to segregate different - * write streams for better cache utilization: first we look for a bucket where - * the last write to it was sequential with the current write, and failing that - * we look for a bucket that was last used by the same task. - * - * The ideas is if you've got multiple tasks pulling data into the cache at the - * same time, you'll get better cache utilization if you try to segregate their - * data and preserve locality. - * - * For example, say you've starting Firefox at the same time you're copying a - * bunch of files. Firefox will likely end up being fairly hot and stay in the - * cache awhile, but the data you copied might not be; if you wrote all that - * data to the same buckets it'd get invalidated at the same time. - * - * Both of those tasks will be doing fairly random IO so we can't rely on - * detecting sequential IO to segregate their data, but going off of the task - * should be a sane heuristic. - */ -static struct open_bucket *pick_data_bucket(struct cache_set *c, -					    const struct bkey *search, -					    struct task_struct *task, -					    struct bkey *alloc) -{ -	struct open_bucket *ret, *ret_task = NULL; +	if (journal_ref) +		atomic_dec_bug(journal_ref); -	list_for_each_entry_reverse(ret, &c->data_buckets, list) -		if (!bkey_cmp(&ret->key, search)) -			goto found; -		else if (ret->last == task) -			ret_task = ret; +	if (!op->insert_data_done) +		continue_at(cl, bch_data_insert_start, op->wq); -	ret = ret_task ?: list_first_entry(&c->data_buckets, -					   struct open_bucket, list); -found: -	if (!ret->sectors_free && KEY_PTRS(alloc)) { -		ret->sectors_free = c->sb.bucket_size; -		bkey_copy(&ret->key, alloc); -		bkey_init(alloc); -	} - -	if (!ret->sectors_free) -		ret = NULL; - -	return ret; +	bch_keylist_free(&op->insert_keys); +	closure_return(cl);  } -/* - * Allocates some space in the cache to write to, and k to point to the newly - * allocated space, and updates KEY_SIZE(k) and KEY_OFFSET(k) (to point to the - * end of the newly allocated space). - * - * May allocate fewer sectors than @sectors, KEY_SIZE(k) indicates how many - * sectors were actually allocated. - * - * If s->writeback is true, will not fail. - */ -static bool bch_alloc_sectors(struct bkey *k, unsigned sectors, -			      struct search *s) +static int bch_keylist_realloc(struct keylist *l, unsigned u64s, +			       struct cache_set *c)  { -	struct cache_set *c = s->op.c; -	struct open_bucket *b; -	BKEY_PADDED(key) alloc; -	struct closure cl, *w = NULL; -	unsigned i; - -	if (s->writeback) { -		closure_init_stack(&cl); -		w = &cl; -	} - -	/* -	 * We might have to allocate a new bucket, which we can't do with a -	 * spinlock held. So if we have to allocate, we drop the lock, allocate -	 * and then retry. KEY_PTRS() indicates whether alloc points to -	 * allocated bucket(s). -	 */ - -	bkey_init(&alloc.key); -	spin_lock(&c->data_bucket_lock); - -	while (!(b = pick_data_bucket(c, k, s->task, &alloc.key))) { -		unsigned watermark = s->op.write_prio -			? WATERMARK_MOVINGGC -			: WATERMARK_NONE; - -		spin_unlock(&c->data_bucket_lock); - -		if (bch_bucket_alloc_set(c, watermark, &alloc.key, 1, w)) -			return false; - -		spin_lock(&c->data_bucket_lock); -	} +	size_t oldsize = bch_keylist_nkeys(l); +	size_t newsize = oldsize + u64s;  	/* -	 * If we had to allocate, we might race and not need to allocate the -	 * second time we call find_data_bucket(). If we allocated a bucket but -	 * didn't use it, drop the refcount bch_bucket_alloc_set() took: +	 * The journalling code doesn't handle the case where the keys to insert +	 * is bigger than an empty write: If we just return -ENOMEM here, +	 * bio_insert() and bio_invalidate() will insert the keys created so far +	 * and finish the rest when the keylist is empty.  	 */ -	if (KEY_PTRS(&alloc.key)) -		__bkey_put(c, &alloc.key); - -	for (i = 0; i < KEY_PTRS(&b->key); i++) -		EBUG_ON(ptr_stale(c, &b->key, i)); - -	/* Set up the pointer to the space we're allocating: */ +	if (newsize * sizeof(uint64_t) > block_bytes(c) - sizeof(struct jset)) +		return -ENOMEM; -	for (i = 0; i < KEY_PTRS(&b->key); i++) -		k->ptr[i] = b->key.ptr[i]; +	return __bch_keylist_realloc(l, u64s); +} -	sectors = min(sectors, b->sectors_free); +static void bch_data_invalidate(struct closure *cl) +{ +	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); +	struct bio *bio = op->bio; -	SET_KEY_OFFSET(k, KEY_OFFSET(k) + sectors); -	SET_KEY_SIZE(k, sectors); -	SET_KEY_PTRS(k, KEY_PTRS(&b->key)); +	pr_debug("invalidating %i sectors from %llu", +		 bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector); -	/* -	 * Move b to the end of the lru, and keep track of what this bucket was -	 * last used for: -	 */ -	list_move_tail(&b->list, &c->data_buckets); -	bkey_copy_key(&b->key, k); -	b->last = s->task; +	while (bio_sectors(bio)) { +		unsigned sectors = min(bio_sectors(bio), +				       1U << (KEY_SIZE_BITS - 1)); -	b->sectors_free	-= sectors; +		if (bch_keylist_realloc(&op->insert_keys, 2, op->c)) +			goto out; -	for (i = 0; i < KEY_PTRS(&b->key); i++) { -		SET_PTR_OFFSET(&b->key, i, PTR_OFFSET(&b->key, i) + sectors); +		bio->bi_iter.bi_sector	+= sectors; +		bio->bi_iter.bi_size	-= sectors << 9; -		atomic_long_add(sectors, -				&PTR_CACHE(c, &b->key, i)->sectors_written); +		bch_keylist_add(&op->insert_keys, +				&KEY(op->inode, bio->bi_iter.bi_sector, sectors));  	} -	if (b->sectors_free < c->sb.block_size) -		b->sectors_free = 0; - -	/* -	 * k takes refcounts on the buckets it points to until it's inserted -	 * into the btree, but if we're done with this bucket we just transfer -	 * get_data_bucket()'s refcount. -	 */ -	if (b->sectors_free) -		for (i = 0; i < KEY_PTRS(&b->key); i++) -			atomic_inc(&PTR_BUCKET(c, &b->key, i)->pin); - -	spin_unlock(&c->data_bucket_lock); -	return true; +	op->insert_data_done = true; +	bio_put(bio); +out: +	continue_at(cl, bch_data_insert_keys, op->wq);  } -static void bch_insert_data_error(struct closure *cl) +static void bch_data_insert_error(struct closure *cl)  { -	struct btree_op *op = container_of(cl, struct btree_op, cl); +	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);  	/*  	 * Our data write just errored, which means we've got a bunch of keys to @@ -438,35 +153,34 @@ static void bch_insert_data_error(struct closure *cl)  	 * from the keys we'll accomplish just that.  	 */ -	struct bkey *src = op->keys.bottom, *dst = op->keys.bottom; +	struct bkey *src = op->insert_keys.keys, *dst = op->insert_keys.keys; -	while (src != op->keys.top) { +	while (src != op->insert_keys.top) {  		struct bkey *n = bkey_next(src);  		SET_KEY_PTRS(src, 0); -		bkey_copy(dst, src); +		memmove(dst, src, bkey_bytes(src));  		dst = bkey_next(dst);  		src = n;  	} -	op->keys.top = dst; +	op->insert_keys.top = dst; -	bch_journal(cl); +	bch_data_insert_keys(cl);  } -static void bch_insert_data_endio(struct bio *bio, int error) +static void bch_data_insert_endio(struct bio *bio, int error)  {  	struct closure *cl = bio->bi_private; -	struct btree_op *op = container_of(cl, struct btree_op, cl); -	struct search *s = container_of(op, struct search, op); +	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);  	if (error) {  		/* TODO: We could try to recover from this. */ -		if (s->writeback) -			s->error = error; -		else if (s->write) -			set_closure_fn(cl, bch_insert_data_error, bcache_wq); +		if (op->writeback) +			op->error = error; +		else if (!op->replace) +			set_closure_fn(cl, bch_data_insert_error, op->wq);  		else  			set_closure_fn(cl, NULL, NULL);  	} @@ -474,20 +188,19 @@ static void bch_insert_data_endio(struct bio *bio, int error)  	bch_bbio_endio(op->c, bio, error, "writing data to cache");  } -static void bch_insert_data_loop(struct closure *cl) +static void bch_data_insert_start(struct closure *cl)  { -	struct btree_op *op = container_of(cl, struct btree_op, cl); -	struct search *s = container_of(op, struct search, op); -	struct bio *bio = op->cache_bio, *n; - -	if (op->skip) -		return bio_invalidate(cl); +	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); +	struct bio *bio = op->bio, *n;  	if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) {  		set_gc_sectors(op->c); -		bch_queue_gc(op->c); +		wake_up_gc(op->c);  	} +	if (op->bypass) +		return bch_data_invalidate(cl); +  	/*  	 * Journal writes are marked REQ_FLUSH; if the original write was a  	 * flush, it'll wait on the journal write. @@ -497,29 +210,30 @@ static void bch_insert_data_loop(struct closure *cl)  	do {  		unsigned i;  		struct bkey *k; -		struct bio_set *split = s->d -			? s->d->bio_split : op->c->bio_split; +		struct bio_set *split = op->c->bio_split;  		/* 1 for the device pointer and 1 for the chksum */ -		if (bch_keylist_realloc(&op->keys, -					1 + (op->csum ? 1 : 0), +		if (bch_keylist_realloc(&op->insert_keys, +					3 + (op->csum ? 1 : 0),  					op->c)) -			continue_at(cl, bch_journal, bcache_wq); +			continue_at(cl, bch_data_insert_keys, op->wq); -		k = op->keys.top; +		k = op->insert_keys.top;  		bkey_init(k);  		SET_KEY_INODE(k, op->inode); -		SET_KEY_OFFSET(k, bio->bi_sector); +		SET_KEY_OFFSET(k, bio->bi_iter.bi_sector); -		if (!bch_alloc_sectors(k, bio_sectors(bio), s)) +		if (!bch_alloc_sectors(op->c, k, bio_sectors(bio), +				       op->write_point, op->write_prio, +				       op->writeback))  			goto err; -		n = bch_bio_split(bio, KEY_SIZE(k), GFP_NOIO, split); +		n = bio_next_split(bio, KEY_SIZE(k), GFP_NOIO, split); -		n->bi_end_io	= bch_insert_data_endio; +		n->bi_end_io	= bch_data_insert_endio;  		n->bi_private	= cl; -		if (s->writeback) { +		if (op->writeback) {  			SET_KEY_DIRTY(k, true);  			for (i = 0; i < KEY_PTRS(k); i++) @@ -532,17 +246,17 @@ static void bch_insert_data_loop(struct closure *cl)  			bio_csum(n, k);  		trace_bcache_cache_insert(k); -		bch_keylist_push(&op->keys); +		bch_keylist_push(&op->insert_keys);  		n->bi_rw |= REQ_WRITE;  		bch_submit_bbio(n, op->c, k, 0);  	} while (n != bio);  	op->insert_data_done = true; -	continue_at(cl, bch_journal, bcache_wq); +	continue_at(cl, bch_data_insert_keys, op->wq);  err:  	/* bch_alloc_sectors() blocks if s->writeback = true */ -	BUG_ON(s->writeback); +	BUG_ON(op->writeback);  	/*  	 * But if it's not a writeback write we'd rather just bail out if @@ -550,15 +264,15 @@ err:  	 * we might be starving btree writes for gc or something.  	 */ -	if (s->write) { +	if (!op->replace) {  		/*  		 * Writethrough write: We can't complete the write until we've  		 * updated the index. But we don't want to delay the write while  		 * we wait for buckets to be freed up, so just invalidate the  		 * rest of the write.  		 */ -		op->skip = true; -		return bio_invalidate(cl); +		op->bypass = true; +		return bch_data_invalidate(cl);  	} else {  		/*  		 * From a cache miss, we can just insert the keys for the data @@ -567,15 +281,15 @@ err:  		op->insert_data_done = true;  		bio_put(bio); -		if (!bch_keylist_empty(&op->keys)) -			continue_at(cl, bch_journal, bcache_wq); +		if (!bch_keylist_empty(&op->insert_keys)) +			continue_at(cl, bch_data_insert_keys, op->wq);  		else  			closure_return(cl);  	}  }  /** - * bch_insert_data - stick some data in the cache + * bch_data_insert - stick some data in the cache   *   * This is the starting point for any data to end up in a cache device; it could   * be from a normal write, or a writeback write, or a write to a flash only @@ -587,56 +301,176 @@ err:   * data is written it calls bch_journal, and after the keys have been added to   * the next journal write they're inserted into the btree.   * - * It inserts the data in op->cache_bio; bi_sector is used for the key offset, + * It inserts the data in s->cache_bio; bi_sector is used for the key offset,   * and op->inode is used for the key inode.   * - * If op->skip is true, instead of inserting the data it invalidates the region - * of the cache represented by op->cache_bio and op->inode. + * If s->bypass is true, instead of inserting the data it invalidates the + * region of the cache represented by s->cache_bio and op->inode.   */ -void bch_insert_data(struct closure *cl) +void bch_data_insert(struct closure *cl)  { -	struct btree_op *op = container_of(cl, struct btree_op, cl); +	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); + +	trace_bcache_write(op->bio, op->writeback, op->bypass); -	bch_keylist_init(&op->keys); -	bio_get(op->cache_bio); -	bch_insert_data_loop(cl); +	bch_keylist_init(&op->insert_keys); +	bio_get(op->bio); +	bch_data_insert_start(cl);  } -void bch_btree_insert_async(struct closure *cl) +/* Congested? */ + +unsigned bch_get_congested(struct cache_set *c)  { -	struct btree_op *op = container_of(cl, struct btree_op, cl); -	struct search *s = container_of(op, struct search, op); +	int i; +	long rand; -	if (bch_btree_insert(op, op->c)) { -		s->error		= -ENOMEM; -		op->insert_data_done	= true; -	} +	if (!c->congested_read_threshold_us && +	    !c->congested_write_threshold_us) +		return 0; + +	i = (local_clock_us() - c->congested_last_us) / 1024; +	if (i < 0) +		return 0; -	if (op->insert_data_done) { -		bch_keylist_free(&op->keys); -		closure_return(cl); -	} else -		continue_at(cl, bch_insert_data_loop, bcache_wq); +	i += atomic_read(&c->congested); +	if (i >= 0) +		return 0; + +	i += CONGESTED_MAX; + +	if (i > 0) +		i = fract_exp_two(i, 6); + +	rand = get_random_int(); +	i -= bitmap_weight(&rand, BITS_PER_LONG); + +	return i > 0 ? i : 1;  } -/* Common code for the make_request functions */ +static void add_sequential(struct task_struct *t) +{ +	ewma_add(t->sequential_io_avg, +		 t->sequential_io, 8, 0); -static void request_endio(struct bio *bio, int error) +	t->sequential_io = 0; +} + +static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)  { -	struct closure *cl = bio->bi_private; +	return &dc->io_hash[hash_64(k, RECENT_IO_BITS)]; +} -	if (error) { -		struct search *s = container_of(cl, struct search, cl); -		s->error = error; -		/* Only cache read errors are recoverable */ -		s->recoverable = false; +static bool check_should_bypass(struct cached_dev *dc, struct bio *bio) +{ +	struct cache_set *c = dc->disk.c; +	unsigned mode = cache_mode(dc, bio); +	unsigned sectors, congested = bch_get_congested(c); +	struct task_struct *task = current; +	struct io *i; + +	if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) || +	    c->gc_stats.in_use > CUTOFF_CACHE_ADD || +	    (bio->bi_rw & REQ_DISCARD)) +		goto skip; + +	if (mode == CACHE_MODE_NONE || +	    (mode == CACHE_MODE_WRITEAROUND && +	     (bio->bi_rw & REQ_WRITE))) +		goto skip; + +	if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) || +	    bio_sectors(bio) & (c->sb.block_size - 1)) { +		pr_debug("skipping unaligned io"); +		goto skip;  	} -	bio_put(bio); -	closure_put(cl); +	if (bypass_torture_test(dc)) { +		if ((get_random_int() & 3) == 3) +			goto skip; +		else +			goto rescale; +	} + +	if (!congested && !dc->sequential_cutoff) +		goto rescale; + +	if (!congested && +	    mode == CACHE_MODE_WRITEBACK && +	    (bio->bi_rw & REQ_WRITE) && +	    (bio->bi_rw & REQ_SYNC)) +		goto rescale; + +	spin_lock(&dc->io_lock); + +	hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash) +		if (i->last == bio->bi_iter.bi_sector && +		    time_before(jiffies, i->jiffies)) +			goto found; + +	i = list_first_entry(&dc->io_lru, struct io, lru); + +	add_sequential(task); +	i->sequential = 0; +found: +	if (i->sequential + bio->bi_iter.bi_size > i->sequential) +		i->sequential	+= bio->bi_iter.bi_size; + +	i->last			 = bio_end_sector(bio); +	i->jiffies		 = jiffies + msecs_to_jiffies(5000); +	task->sequential_io	 = i->sequential; + +	hlist_del(&i->hash); +	hlist_add_head(&i->hash, iohash(dc, i->last)); +	list_move_tail(&i->lru, &dc->io_lru); + +	spin_unlock(&dc->io_lock); + +	sectors = max(task->sequential_io, +		      task->sequential_io_avg) >> 9; + +	if (dc->sequential_cutoff && +	    sectors >= dc->sequential_cutoff >> 9) { +		trace_bcache_bypass_sequential(bio); +		goto skip; +	} + +	if (congested && sectors >= congested) { +		trace_bcache_bypass_congested(bio); +		goto skip; +	} + +rescale: +	bch_rescale_priorities(c, bio_sectors(bio)); +	return false; +skip: +	bch_mark_sectors_bypassed(c, dc, bio_sectors(bio)); +	return true;  } -void bch_cache_read_endio(struct bio *bio, int error) +/* Cache lookup */ + +struct search { +	/* Stack frame for bio_complete */ +	struct closure		cl; + +	struct bbio		bio; +	struct bio		*orig_bio; +	struct bio		*cache_miss; +	struct bcache_device	*d; + +	unsigned		insert_bio_sectors; +	unsigned		recoverable:1; +	unsigned		write:1; +	unsigned		read_dirty_data:1; + +	unsigned long		start_time; + +	struct btree_op		op; +	struct data_insert_op	iop; +}; + +static void bch_cache_read_endio(struct bio *bio, int error)  {  	struct bbio *b = container_of(bio, struct bbio, bio);  	struct closure *cl = bio->bi_private; @@ -650,13 +484,117 @@ void bch_cache_read_endio(struct bio *bio, int error)  	 */  	if (error) -		s->error = error; -	else if (ptr_stale(s->op.c, &b->key, 0)) { -		atomic_long_inc(&s->op.c->cache_read_races); -		s->error = -EINTR; +		s->iop.error = error; +	else if (!KEY_DIRTY(&b->key) && +		 ptr_stale(s->iop.c, &b->key, 0)) { +		atomic_long_inc(&s->iop.c->cache_read_races); +		s->iop.error = -EINTR; +	} + +	bch_bbio_endio(s->iop.c, bio, error, "reading from cache"); +} + +/* + * Read from a single key, handling the initial cache miss if the key starts in + * the middle of the bio + */ +static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k) +{ +	struct search *s = container_of(op, struct search, op); +	struct bio *n, *bio = &s->bio.bio; +	struct bkey *bio_key; +	unsigned ptr; + +	if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0) +		return MAP_CONTINUE; + +	if (KEY_INODE(k) != s->iop.inode || +	    KEY_START(k) > bio->bi_iter.bi_sector) { +		unsigned bio_sectors = bio_sectors(bio); +		unsigned sectors = KEY_INODE(k) == s->iop.inode +			? min_t(uint64_t, INT_MAX, +				KEY_START(k) - bio->bi_iter.bi_sector) +			: INT_MAX; + +		int ret = s->d->cache_miss(b, s, bio, sectors); +		if (ret != MAP_CONTINUE) +			return ret; + +		/* if this was a complete miss we shouldn't get here */ +		BUG_ON(bio_sectors <= sectors);  	} -	bch_bbio_endio(s->op.c, bio, error, "reading from cache"); +	if (!KEY_SIZE(k)) +		return MAP_CONTINUE; + +	/* XXX: figure out best pointer - for multiple cache devices */ +	ptr = 0; + +	PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO; + +	if (KEY_DIRTY(k)) +		s->read_dirty_data = true; + +	n = bio_next_split(bio, min_t(uint64_t, INT_MAX, +				      KEY_OFFSET(k) - bio->bi_iter.bi_sector), +			   GFP_NOIO, s->d->bio_split); + +	bio_key = &container_of(n, struct bbio, bio)->key; +	bch_bkey_copy_single_ptr(bio_key, k, ptr); + +	bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key); +	bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key); + +	n->bi_end_io	= bch_cache_read_endio; +	n->bi_private	= &s->cl; + +	/* +	 * The bucket we're reading from might be reused while our bio +	 * is in flight, and we could then end up reading the wrong +	 * data. +	 * +	 * We guard against this by checking (in cache_read_endio()) if +	 * the pointer is stale again; if so, we treat it as an error +	 * and reread from the backing device (but we don't pass that +	 * error up anywhere). +	 */ + +	__bch_submit_bbio(n, b->c); +	return n == bio ? MAP_DONE : MAP_CONTINUE; +} + +static void cache_lookup(struct closure *cl) +{ +	struct search *s = container_of(cl, struct search, iop.cl); +	struct bio *bio = &s->bio.bio; +	int ret; + +	bch_btree_op_init(&s->op, -1); + +	ret = bch_btree_map_keys(&s->op, s->iop.c, +				 &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0), +				 cache_lookup_fn, MAP_END_KEY); +	if (ret == -EAGAIN) +		continue_at(cl, cache_lookup, bcache_wq); + +	closure_return(cl); +} + +/* Common code for the make_request functions */ + +static void request_endio(struct bio *bio, int error) +{ +	struct closure *cl = bio->bi_private; + +	if (error) { +		struct search *s = container_of(cl, struct search, cl); +		s->iop.error = error; +		/* Only cache read errors are recoverable */ +		s->recoverable = false; +	} + +	bio_put(bio); +	closure_put(cl);  }  static void bio_complete(struct search *s) @@ -670,19 +608,21 @@ static void bio_complete(struct search *s)  		part_stat_add(cpu, &s->d->disk->part0, ticks[rw], duration);  		part_stat_unlock(); -		trace_bcache_request_end(s, s->orig_bio); -		bio_endio(s->orig_bio, s->error); +		trace_bcache_request_end(s->d, s->orig_bio); +		bio_endio(s->orig_bio, s->iop.error);  		s->orig_bio = NULL;  	}  } -static void do_bio_hook(struct search *s) +static void do_bio_hook(struct search *s, struct bio *orig_bio)  {  	struct bio *bio = &s->bio.bio; -	memcpy(bio, s->orig_bio, sizeof(struct bio)); +	bio_init(bio); +	__bio_clone_fast(bio, orig_bio);  	bio->bi_end_io		= request_endio;  	bio->bi_private		= &s->cl; +  	atomic_set(&bio->bi_cnt, 3);  } @@ -691,61 +631,44 @@ static void search_free(struct closure *cl)  	struct search *s = container_of(cl, struct search, cl);  	bio_complete(s); -	if (s->op.cache_bio) -		bio_put(s->op.cache_bio); - -	if (s->unaligned_bvec) -		mempool_free(s->bio.bio.bi_io_vec, s->d->unaligned_bvec); +	if (s->iop.bio) +		bio_put(s->iop.bio);  	closure_debug_destroy(cl);  	mempool_free(s, s->d->c->search);  } -static struct search *search_alloc(struct bio *bio, struct bcache_device *d) +static inline struct search *search_alloc(struct bio *bio, +					  struct bcache_device *d)  { -	struct bio_vec *bv; -	struct search *s = mempool_alloc(d->c->search, GFP_NOIO); -	memset(s, 0, offsetof(struct search, op.keys)); +	struct search *s; -	__closure_init(&s->cl, NULL); +	s = mempool_alloc(d->c->search, GFP_NOIO); + +	closure_init(&s->cl, NULL); +	do_bio_hook(s, bio); -	s->op.inode		= d->id; -	s->op.c			= d->c; -	s->d			= d; -	s->op.lock		= -1; -	s->task			= current;  	s->orig_bio		= bio; -	s->write		= (bio->bi_rw & REQ_WRITE) != 0; -	s->op.flush_journal	= (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0; -	s->op.skip		= (bio->bi_rw & REQ_DISCARD) != 0; +	s->cache_miss		= NULL; +	s->d			= d;  	s->recoverable		= 1; +	s->write		= (bio->bi_rw & REQ_WRITE) != 0; +	s->read_dirty_data	= 0;  	s->start_time		= jiffies; -	do_bio_hook(s); -	if (bio->bi_size != bio_segments(bio) * PAGE_SIZE) { -		bv = mempool_alloc(d->unaligned_bvec, GFP_NOIO); -		memcpy(bv, bio_iovec(bio), -		       sizeof(struct bio_vec) * bio_segments(bio)); - -		s->bio.bio.bi_io_vec	= bv; -		s->unaligned_bvec	= 1; -	} +	s->iop.c		= d->c; +	s->iop.bio		= NULL; +	s->iop.inode		= d->id; +	s->iop.write_point	= hash_long((unsigned long) current, 16); +	s->iop.write_prio	= 0; +	s->iop.error		= 0; +	s->iop.flags		= 0; +	s->iop.flush_journal	= (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0; +	s->iop.wq		= bcache_wq;  	return s;  } -static void btree_read_async(struct closure *cl) -{ -	struct btree_op *op = container_of(cl, struct btree_op, cl); - -	int ret = btree_root(search_recurse, op->c, op); - -	if (ret == -EAGAIN) -		continue_at(cl, btree_read_async, bcache_wq); - -	closure_return(cl); -} -  /* Cached devices */  static void cached_dev_bio_complete(struct closure *cl) @@ -759,190 +682,179 @@ static void cached_dev_bio_complete(struct closure *cl)  /* Process reads */ -static void cached_dev_read_complete(struct closure *cl) +static void cached_dev_cache_miss_done(struct closure *cl)  {  	struct search *s = container_of(cl, struct search, cl); -	if (s->op.insert_collision) -		bch_mark_cache_miss_collision(s); +	if (s->iop.replace_collision) +		bch_mark_cache_miss_collision(s->iop.c, s->d); -	if (s->op.cache_bio) { +	if (s->iop.bio) {  		int i;  		struct bio_vec *bv; -		__bio_for_each_segment(bv, s->op.cache_bio, i, 0) +		bio_for_each_segment_all(bv, s->iop.bio, i)  			__free_page(bv->bv_page);  	}  	cached_dev_bio_complete(cl);  } -static void request_read_error(struct closure *cl) +static void cached_dev_read_error(struct closure *cl)  {  	struct search *s = container_of(cl, struct search, cl); -	struct bio_vec *bv; -	int i; +	struct bio *bio = &s->bio.bio;  	if (s->recoverable) {  		/* Retry from the backing device: */  		trace_bcache_read_retry(s->orig_bio); -		s->error = 0; -		bv = s->bio.bio.bi_io_vec; -		do_bio_hook(s); -		s->bio.bio.bi_io_vec = bv; - -		if (!s->unaligned_bvec) -			bio_for_each_segment(bv, s->orig_bio, i) -				bv->bv_offset = 0, bv->bv_len = PAGE_SIZE; -		else -			memcpy(s->bio.bio.bi_io_vec, -			       bio_iovec(s->orig_bio), -			       sizeof(struct bio_vec) * -			       bio_segments(s->orig_bio)); +		s->iop.error = 0; +		do_bio_hook(s, s->orig_bio);  		/* XXX: invalidate cache */ -		closure_bio_submit(&s->bio.bio, &s->cl, s->d); +		closure_bio_submit(bio, cl, s->d);  	} -	continue_at(cl, cached_dev_read_complete, NULL); +	continue_at(cl, cached_dev_cache_miss_done, NULL);  } -static void request_read_done(struct closure *cl) +static void cached_dev_read_done(struct closure *cl)  {  	struct search *s = container_of(cl, struct search, cl);  	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);  	/* -	 * s->cache_bio != NULL implies that we had a cache miss; cache_bio now -	 * contains data ready to be inserted into the cache. +	 * We had a cache miss; cache_bio now contains data ready to be inserted +	 * into the cache.  	 *  	 * First, we copy the data we just read from cache_bio's bounce buffers  	 * to the buffers the original bio pointed to:  	 */ -	if (s->op.cache_bio) { -		bio_reset(s->op.cache_bio); -		s->op.cache_bio->bi_sector	= s->cache_miss->bi_sector; -		s->op.cache_bio->bi_bdev	= s->cache_miss->bi_bdev; -		s->op.cache_bio->bi_size	= s->cache_bio_sectors << 9; -		bch_bio_map(s->op.cache_bio, NULL); +	if (s->iop.bio) { +		bio_reset(s->iop.bio); +		s->iop.bio->bi_iter.bi_sector = s->cache_miss->bi_iter.bi_sector; +		s->iop.bio->bi_bdev = s->cache_miss->bi_bdev; +		s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9; +		bch_bio_map(s->iop.bio, NULL); -		bio_copy_data(s->cache_miss, s->op.cache_bio); +		bio_copy_data(s->cache_miss, s->iop.bio);  		bio_put(s->cache_miss);  		s->cache_miss = NULL;  	} -	if (verify(dc, &s->bio.bio) && s->recoverable) -		bch_data_verify(s); +	if (verify(dc, &s->bio.bio) && s->recoverable && !s->read_dirty_data) +		bch_data_verify(dc, s->orig_bio);  	bio_complete(s); -	if (s->op.cache_bio && -	    !test_bit(CACHE_SET_STOPPING, &s->op.c->flags)) { -		s->op.type = BTREE_REPLACE; -		closure_call(&s->op.cl, bch_insert_data, NULL, cl); +	if (s->iop.bio && +	    !test_bit(CACHE_SET_STOPPING, &s->iop.c->flags)) { +		BUG_ON(!s->iop.replace); +		closure_call(&s->iop.cl, bch_data_insert, NULL, cl);  	} -	continue_at(cl, cached_dev_read_complete, NULL); +	continue_at(cl, cached_dev_cache_miss_done, NULL);  } -static void request_read_done_bh(struct closure *cl) +static void cached_dev_read_done_bh(struct closure *cl)  {  	struct search *s = container_of(cl, struct search, cl);  	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); -	bch_mark_cache_accounting(s, !s->cache_miss, s->op.skip); -	trace_bcache_read(s->orig_bio, !s->cache_miss, s->op.skip); +	bch_mark_cache_accounting(s->iop.c, s->d, +				  !s->cache_miss, s->iop.bypass); +	trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass); -	if (s->error) -		continue_at_nobarrier(cl, request_read_error, bcache_wq); -	else if (s->op.cache_bio || verify(dc, &s->bio.bio)) -		continue_at_nobarrier(cl, request_read_done, bcache_wq); +	if (s->iop.error) +		continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq); +	else if (s->iop.bio || verify(dc, &s->bio.bio)) +		continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq);  	else -		continue_at_nobarrier(cl, cached_dev_read_complete, NULL); +		continue_at_nobarrier(cl, cached_dev_bio_complete, NULL);  }  static int cached_dev_cache_miss(struct btree *b, struct search *s,  				 struct bio *bio, unsigned sectors)  { -	int ret = 0; -	unsigned reada; +	int ret = MAP_CONTINUE; +	unsigned reada = 0;  	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); -	struct bio *miss; - -	miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split); -	if (miss == bio) -		s->op.lookup_done = true; - -	miss->bi_end_io		= request_endio; -	miss->bi_private	= &s->cl; +	struct bio *miss, *cache_bio; -	if (s->cache_miss || s->op.skip) +	if (s->cache_miss || s->iop.bypass) { +		miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split); +		ret = miss == bio ? MAP_DONE : MAP_CONTINUE;  		goto out_submit; - -	if (miss != bio || -	    (bio->bi_rw & REQ_RAHEAD) || -	    (bio->bi_rw & REQ_META) || -	    s->op.c->gc_stats.in_use >= CUTOFF_CACHE_READA) -		reada = 0; -	else { -		reada = min(dc->readahead >> 9, -			    sectors - bio_sectors(miss)); - -		if (bio_end_sector(miss) + reada > bdev_sectors(miss->bi_bdev)) -			reada = bdev_sectors(miss->bi_bdev) - -				bio_end_sector(miss);  	} -	s->cache_bio_sectors = bio_sectors(miss) + reada; -	s->op.cache_bio = bio_alloc_bioset(GFP_NOWAIT, -			DIV_ROUND_UP(s->cache_bio_sectors, PAGE_SECTORS), -			dc->disk.bio_split); +	if (!(bio->bi_rw & REQ_RAHEAD) && +	    !(bio->bi_rw & REQ_META) && +	    s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA) +		reada = min_t(sector_t, dc->readahead >> 9, +			      bdev_sectors(bio->bi_bdev) - bio_end_sector(bio)); -	if (!s->op.cache_bio) -		goto out_submit; +	s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada); + +	s->iop.replace_key = KEY(s->iop.inode, +				 bio->bi_iter.bi_sector + s->insert_bio_sectors, +				 s->insert_bio_sectors); + +	ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key); +	if (ret) +		return ret; -	s->op.cache_bio->bi_sector	= miss->bi_sector; -	s->op.cache_bio->bi_bdev	= miss->bi_bdev; -	s->op.cache_bio->bi_size	= s->cache_bio_sectors << 9; +	s->iop.replace = true; -	s->op.cache_bio->bi_end_io	= request_endio; -	s->op.cache_bio->bi_private	= &s->cl; +	miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);  	/* btree_search_recurse()'s btree iterator is no good anymore */ -	ret = -EINTR; -	if (!bch_btree_insert_check_key(b, &s->op, s->op.cache_bio)) -		goto out_put; +	ret = miss == bio ? MAP_DONE : -EINTR; + +	cache_bio = bio_alloc_bioset(GFP_NOWAIT, +			DIV_ROUND_UP(s->insert_bio_sectors, PAGE_SECTORS), +			dc->disk.bio_split); +	if (!cache_bio) +		goto out_submit; -	bch_bio_map(s->op.cache_bio, NULL); -	if (bio_alloc_pages(s->op.cache_bio, __GFP_NOWARN|GFP_NOIO)) +	cache_bio->bi_iter.bi_sector	= miss->bi_iter.bi_sector; +	cache_bio->bi_bdev		= miss->bi_bdev; +	cache_bio->bi_iter.bi_size	= s->insert_bio_sectors << 9; + +	cache_bio->bi_end_io	= request_endio; +	cache_bio->bi_private	= &s->cl; + +	bch_bio_map(cache_bio, NULL); +	if (bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO))  		goto out_put; -	s->cache_miss = miss; -	bio_get(s->op.cache_bio); +	if (reada) +		bch_mark_cache_readahead(s->iop.c, s->d); -	closure_bio_submit(s->op.cache_bio, &s->cl, s->d); +	s->cache_miss	= miss; +	s->iop.bio	= cache_bio; +	bio_get(cache_bio); +	closure_bio_submit(cache_bio, &s->cl, s->d);  	return ret;  out_put: -	bio_put(s->op.cache_bio); -	s->op.cache_bio = NULL; +	bio_put(cache_bio);  out_submit: +	miss->bi_end_io		= request_endio; +	miss->bi_private	= &s->cl;  	closure_bio_submit(miss, &s->cl, s->d);  	return ret;  } -static void request_read(struct cached_dev *dc, struct search *s) +static void cached_dev_read(struct cached_dev *dc, struct search *s)  {  	struct closure *cl = &s->cl; -	check_should_skip(dc, s); -	closure_call(&s->op.cl, btree_read_async, NULL, cl); - -	continue_at(cl, request_read_done_bh, NULL); +	closure_call(&s->iop.cl, cache_lookup, NULL, cl); +	continue_at(cl, cached_dev_read_done_bh, NULL);  }  /* Process writes */ @@ -956,50 +868,56 @@ static void cached_dev_write_complete(struct closure *cl)  	cached_dev_bio_complete(cl);  } -static void request_write(struct cached_dev *dc, struct search *s) +static void cached_dev_write(struct cached_dev *dc, struct search *s)  {  	struct closure *cl = &s->cl;  	struct bio *bio = &s->bio.bio; -	struct bkey start, end; -	start = KEY(dc->disk.id, bio->bi_sector, 0); -	end = KEY(dc->disk.id, bio_end_sector(bio), 0); +	struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0); +	struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0); -	bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys, &start, &end); +	bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end); -	check_should_skip(dc, s);  	down_read_non_owner(&dc->writeback_lock); -  	if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) { -		s->op.skip	= false; -		s->writeback	= true; +		/* +		 * We overlap with some dirty data undergoing background +		 * writeback, force this write to writeback +		 */ +		s->iop.bypass = false; +		s->iop.writeback = true;  	} +	/* +	 * Discards aren't _required_ to do anything, so skipping if +	 * check_overlapping returned true is ok +	 * +	 * But check_overlapping drops dirty keys for which io hasn't started, +	 * so we still want to call it. +	 */  	if (bio->bi_rw & REQ_DISCARD) -		goto skip; +		s->iop.bypass = true;  	if (should_writeback(dc, s->orig_bio,  			     cache_mode(dc, bio), -			     s->op.skip)) { -		s->op.skip = false; -		s->writeback = true; +			     s->iop.bypass)) { +		s->iop.bypass = false; +		s->iop.writeback = true;  	} -	if (s->op.skip) -		goto skip; - -	trace_bcache_write(s->orig_bio, s->writeback, s->op.skip); - -	if (!s->writeback) { -		s->op.cache_bio = bio_clone_bioset(bio, GFP_NOIO, -						   dc->disk.bio_split); +	if (s->iop.bypass) { +		s->iop.bio = s->orig_bio; +		bio_get(s->iop.bio); -		closure_bio_submit(bio, cl, s->d); -	} else { +		if (!(bio->bi_rw & REQ_DISCARD) || +		    blk_queue_discard(bdev_get_queue(dc->bdev))) +			closure_bio_submit(bio, cl, s->d); +	} else if (s->iop.writeback) {  		bch_writeback_add(dc); +		s->iop.bio = bio;  		if (bio->bi_rw & REQ_FLUSH) {  			/* Also need to send a flush to the backing device */ -			struct bio *flush = bio_alloc_bioset(0, GFP_NOIO, +			struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0,  							     dc->disk.bio_split);  			flush->bi_rw	= WRITE_FLUSH; @@ -1008,39 +926,26 @@ static void request_write(struct cached_dev *dc, struct search *s)  			flush->bi_private = cl;  			closure_bio_submit(flush, cl, s->d); -		} else { -			s->op.cache_bio = bio;  		} -	} -out: -	closure_call(&s->op.cl, bch_insert_data, NULL, cl); -	continue_at(cl, cached_dev_write_complete, NULL); -skip: -	s->op.skip = true; -	s->op.cache_bio = s->orig_bio; -	bio_get(s->op.cache_bio); +	} else { +		s->iop.bio = bio_clone_fast(bio, GFP_NOIO, dc->disk.bio_split); -	if ((bio->bi_rw & REQ_DISCARD) && -	    !blk_queue_discard(bdev_get_queue(dc->bdev))) -		goto out; +		closure_bio_submit(bio, cl, s->d); +	} -	closure_bio_submit(bio, cl, s->d); -	goto out; +	closure_call(&s->iop.cl, bch_data_insert, NULL, cl); +	continue_at(cl, cached_dev_write_complete, NULL);  } -static void request_nodata(struct cached_dev *dc, struct search *s) +static void cached_dev_nodata(struct closure *cl)  { -	struct closure *cl = &s->cl; +	struct search *s = container_of(cl, struct search, cl);  	struct bio *bio = &s->bio.bio; -	if (bio->bi_rw & REQ_DISCARD) { -		request_write(dc, s); -		return; -	} - -	if (s->op.flush_journal) -		bch_journal_meta(s->op.c, cl); +	if (s->iop.flush_journal) +		bch_journal_meta(s->iop.c, cl); +	/* If it's a flush, we send the flush to the backing device too */  	closure_bio_submit(bio, cl, s->d);  	continue_at(cl, cached_dev_bio_complete, NULL); @@ -1048,134 +953,6 @@ static void request_nodata(struct cached_dev *dc, struct search *s)  /* Cached devices - read & write stuff */ -unsigned bch_get_congested(struct cache_set *c) -{ -	int i; -	long rand; - -	if (!c->congested_read_threshold_us && -	    !c->congested_write_threshold_us) -		return 0; - -	i = (local_clock_us() - c->congested_last_us) / 1024; -	if (i < 0) -		return 0; - -	i += atomic_read(&c->congested); -	if (i >= 0) -		return 0; - -	i += CONGESTED_MAX; - -	if (i > 0) -		i = fract_exp_two(i, 6); - -	rand = get_random_int(); -	i -= bitmap_weight(&rand, BITS_PER_LONG); - -	return i > 0 ? i : 1; -} - -static void add_sequential(struct task_struct *t) -{ -	ewma_add(t->sequential_io_avg, -		 t->sequential_io, 8, 0); - -	t->sequential_io = 0; -} - -static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k) -{ -	return &dc->io_hash[hash_64(k, RECENT_IO_BITS)]; -} - -static void check_should_skip(struct cached_dev *dc, struct search *s) -{ -	struct cache_set *c = s->op.c; -	struct bio *bio = &s->bio.bio; -	unsigned mode = cache_mode(dc, bio); -	unsigned sectors, congested = bch_get_congested(c); - -	if (atomic_read(&dc->disk.detaching) || -	    c->gc_stats.in_use > CUTOFF_CACHE_ADD || -	    (bio->bi_rw & REQ_DISCARD)) -		goto skip; - -	if (mode == CACHE_MODE_NONE || -	    (mode == CACHE_MODE_WRITEAROUND && -	     (bio->bi_rw & REQ_WRITE))) -		goto skip; - -	if (bio->bi_sector   & (c->sb.block_size - 1) || -	    bio_sectors(bio) & (c->sb.block_size - 1)) { -		pr_debug("skipping unaligned io"); -		goto skip; -	} - -	if (!congested && !dc->sequential_cutoff) -		goto rescale; - -	if (!congested && -	    mode == CACHE_MODE_WRITEBACK && -	    (bio->bi_rw & REQ_WRITE) && -	    (bio->bi_rw & REQ_SYNC)) -		goto rescale; - -	if (dc->sequential_merge) { -		struct io *i; - -		spin_lock(&dc->io_lock); - -		hlist_for_each_entry(i, iohash(dc, bio->bi_sector), hash) -			if (i->last == bio->bi_sector && -			    time_before(jiffies, i->jiffies)) -				goto found; - -		i = list_first_entry(&dc->io_lru, struct io, lru); - -		add_sequential(s->task); -		i->sequential = 0; -found: -		if (i->sequential + bio->bi_size > i->sequential) -			i->sequential	+= bio->bi_size; - -		i->last			 = bio_end_sector(bio); -		i->jiffies		 = jiffies + msecs_to_jiffies(5000); -		s->task->sequential_io	 = i->sequential; - -		hlist_del(&i->hash); -		hlist_add_head(&i->hash, iohash(dc, i->last)); -		list_move_tail(&i->lru, &dc->io_lru); - -		spin_unlock(&dc->io_lock); -	} else { -		s->task->sequential_io = bio->bi_size; - -		add_sequential(s->task); -	} - -	sectors = max(s->task->sequential_io, -		      s->task->sequential_io_avg) >> 9; - -	if (dc->sequential_cutoff && -	    sectors >= dc->sequential_cutoff >> 9) { -		trace_bcache_bypass_sequential(s->orig_bio); -		goto skip; -	} - -	if (congested && sectors >= congested) { -		trace_bcache_bypass_congested(s->orig_bio); -		goto skip; -	} - -rescale: -	bch_rescale_priorities(c, bio_sectors(bio)); -	return; -skip: -	bch_mark_sectors_bypassed(s, bio_sectors(bio)); -	s->op.skip = true; -} -  static void cached_dev_make_request(struct request_queue *q, struct bio *bio)  {  	struct search *s; @@ -1189,18 +966,28 @@ static void cached_dev_make_request(struct request_queue *q, struct bio *bio)  	part_stat_unlock();  	bio->bi_bdev = dc->bdev; -	bio->bi_sector += dc->sb.data_offset; +	bio->bi_iter.bi_sector += dc->sb.data_offset;  	if (cached_dev_get(dc)) {  		s = search_alloc(bio, d); -		trace_bcache_request_start(s, bio); +		trace_bcache_request_start(s->d, bio); + +		if (!bio->bi_iter.bi_size) { +			/* +			 * can't call bch_journal_meta from under +			 * generic_make_request +			 */ +			continue_at_nobarrier(&s->cl, +					      cached_dev_nodata, +					      bcache_wq); +		} else { +			s->iop.bypass = check_should_bypass(dc, bio); -		if (!bio_has_data(bio)) -			request_nodata(dc, s); -		else if (rw) -			request_write(dc, s); -		else -			request_read(dc, s); +			if (rw) +				cached_dev_write(dc, s); +			else +				cached_dev_read(dc, s); +		}  	} else {  		if ((bio->bi_rw & REQ_DISCARD) &&  		    !blk_queue_discard(bdev_get_queue(dc->bdev))) @@ -1257,27 +1044,28 @@ void bch_cached_dev_request_init(struct cached_dev *dc)  static int flash_dev_cache_miss(struct btree *b, struct search *s,  				struct bio *bio, unsigned sectors)  { -	struct bio_vec *bv; -	int i; +	unsigned bytes = min(sectors, bio_sectors(bio)) << 9; -	/* Zero fill bio */ +	swap(bio->bi_iter.bi_size, bytes); +	zero_fill_bio(bio); +	swap(bio->bi_iter.bi_size, bytes); -	bio_for_each_segment(bv, bio, i) { -		unsigned j = min(bv->bv_len >> 9, sectors); +	bio_advance(bio, bytes); -		void *p = kmap(bv->bv_page); -		memset(p + bv->bv_offset, 0, j << 9); -		kunmap(bv->bv_page); +	if (!bio->bi_iter.bi_size) +		return MAP_DONE; -		sectors	-= j; -	} +	return MAP_CONTINUE; +} -	bio_advance(bio, min(sectors << 9, bio->bi_size)); +static void flash_dev_nodata(struct closure *cl) +{ +	struct search *s = container_of(cl, struct search, cl); -	if (!bio->bi_size) -		s->op.lookup_done = true; +	if (s->iop.flush_journal) +		bch_journal_meta(s->iop.c, cl); -	return 0; +	continue_at(cl, search_free, NULL);  }  static void flash_dev_make_request(struct request_queue *q, struct bio *bio) @@ -1296,23 +1084,28 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)  	cl = &s->cl;  	bio = &s->bio.bio; -	trace_bcache_request_start(s, bio); +	trace_bcache_request_start(s->d, bio); -	if (bio_has_data(bio) && !rw) { -		closure_call(&s->op.cl, btree_read_async, NULL, cl); -	} else if (bio_has_data(bio) || s->op.skip) { -		bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys, -					&KEY(d->id, bio->bi_sector, 0), +	if (!bio->bi_iter.bi_size) { +		/* +		 * can't call bch_journal_meta from under +		 * generic_make_request +		 */ +		continue_at_nobarrier(&s->cl, +				      flash_dev_nodata, +				      bcache_wq); +	} else if (rw) { +		bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, +					&KEY(d->id, bio->bi_iter.bi_sector, 0),  					&KEY(d->id, bio_end_sector(bio), 0)); -		s->writeback	= true; -		s->op.cache_bio	= bio; +		s->iop.bypass		= (bio->bi_rw & REQ_DISCARD) != 0; +		s->iop.writeback	= true; +		s->iop.bio		= bio; -		closure_call(&s->op.cl, bch_insert_data, NULL, cl); +		closure_call(&s->iop.cl, bch_data_insert, NULL, cl);  	} else { -		/* No data - probably a cache flush */ -		if (s->op.flush_journal) -			bch_journal_meta(s->op.c, cl); +		closure_call(&s->iop.cl, cache_lookup, NULL, cl);  	}  	continue_at(cl, search_free, NULL); @@ -1352,9 +1145,6 @@ void bch_flash_dev_request_init(struct bcache_device *d)  void bch_request_exit(void)  { -#ifdef CONFIG_CGROUP_BCACHE -	cgroup_unload_subsys(&bcache_subsys); -#endif  	if (bch_search_cache)  		kmem_cache_destroy(bch_search_cache);  } @@ -1365,11 +1155,5 @@ int __init bch_request_init(void)  	if (!bch_search_cache)  		return -ENOMEM; -#ifdef CONFIG_CGROUP_BCACHE -	cgroup_load_subsys(&bcache_subsys); -	init_bch_cgroup(&bcache_default_cgroup); - -	cgroup_add_cftypes(&bcache_subsys, bch_files); -#endif  	return 0;  }  | 
