Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1/*
   2 * Main bcache entry point - handle a read or a write request and decide what to
   3 * do with it; the make_request functions are called by the block layer.
   4 *
   5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
   6 * Copyright 2012 Google, Inc.
   7 */
   8
   9#include "bcache.h"
  10#include "btree.h"
  11#include "debug.h"
  12#include "request.h"
  13#include "writeback.h"
  14
  15#include <linux/module.h>
  16#include <linux/hash.h>
  17#include <linux/random.h>
  18#include <linux/backing-dev.h>
  19
  20#include <trace/events/bcache.h>
  21
  22#define CUTOFF_CACHE_ADD	95
  23#define CUTOFF_CACHE_READA	90
  24
  25struct kmem_cache *bch_search_cache;
  26
  27static void bch_data_insert_start(struct closure *);
  28
  29static unsigned cache_mode(struct cached_dev *dc, struct bio *bio)
  30{
  31	return BDEV_CACHE_MODE(&dc->sb);
  32}
  33
  34static bool verify(struct cached_dev *dc, struct bio *bio)
  35{
  36	return dc->verify;
  37}
  38
  39static void bio_csum(struct bio *bio, struct bkey *k)
  40{
  41	struct bio_vec bv;
  42	struct bvec_iter iter;
  43	uint64_t csum = 0;
  44
  45	bio_for_each_segment(bv, bio, iter) {
  46		void *d = kmap(bv.bv_page) + bv.bv_offset;
  47		csum = bch_crc64_update(csum, d, bv.bv_len);
  48		kunmap(bv.bv_page);
  49	}
  50
  51	k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1);
  52}
  53
  54/* Insert data into cache */
  55
  56static void bch_data_insert_keys(struct closure *cl)
  57{
  58	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
  59	atomic_t *journal_ref = NULL;
  60	struct bkey *replace_key = op->replace ? &op->replace_key : NULL;
  61	int ret;
  62
  63	/*
  64	 * If we're looping, might already be waiting on
  65	 * another journal write - can't wait on more than one journal write at
  66	 * a time
  67	 *
  68	 * XXX: this looks wrong
  69	 */
  70#if 0
  71	while (atomic_read(&s->cl.remaining) & CLOSURE_WAITING)
  72		closure_sync(&s->cl);
  73#endif
  74
  75	if (!op->replace)
  76		journal_ref = bch_journal(op->c, &op->insert_keys,
  77					  op->flush_journal ? cl : NULL);
  78
  79	ret = bch_btree_insert(op->c, &op->insert_keys,
  80			       journal_ref, replace_key);
  81	if (ret == -ESRCH) {
  82		op->replace_collision = true;
  83	} else if (ret) {
  84		op->error		= -ENOMEM;
  85		op->insert_data_done	= true;
  86	}
  87
  88	if (journal_ref)
  89		atomic_dec_bug(journal_ref);
  90
  91	if (!op->insert_data_done) {
  92		continue_at(cl, bch_data_insert_start, op->wq);
  93		return;
  94	}
  95
  96	bch_keylist_free(&op->insert_keys);
  97	closure_return(cl);
  98}
  99
 100static int bch_keylist_realloc(struct keylist *l, unsigned u64s,
 101			       struct cache_set *c)
 102{
 103	size_t oldsize = bch_keylist_nkeys(l);
 104	size_t newsize = oldsize + u64s;
 105
 106	/*
 107	 * The journalling code doesn't handle the case where the keys to insert
 108	 * is bigger than an empty write: If we just return -ENOMEM here,
 109	 * bio_insert() and bio_invalidate() will insert the keys created so far
 110	 * and finish the rest when the keylist is empty.
 111	 */
 112	if (newsize * sizeof(uint64_t) > block_bytes(c) - sizeof(struct jset))
 113		return -ENOMEM;
 114
 115	return __bch_keylist_realloc(l, u64s);
 116}
 117
 118static void bch_data_invalidate(struct closure *cl)
 119{
 120	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
 121	struct bio *bio = op->bio;
 122
 123	pr_debug("invalidating %i sectors from %llu",
 124		 bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector);
 125
 126	while (bio_sectors(bio)) {
 127		unsigned sectors = min(bio_sectors(bio),
 128				       1U << (KEY_SIZE_BITS - 1));
 129
 130		if (bch_keylist_realloc(&op->insert_keys, 2, op->c))
 131			goto out;
 132
 133		bio->bi_iter.bi_sector	+= sectors;
 134		bio->bi_iter.bi_size	-= sectors << 9;
 135
 136		bch_keylist_add(&op->insert_keys,
 137				&KEY(op->inode, bio->bi_iter.bi_sector, sectors));
 138	}
 139
 140	op->insert_data_done = true;
 141	bio_put(bio);
 142out:
 143	continue_at(cl, bch_data_insert_keys, op->wq);
 144}
 145
 146static void bch_data_insert_error(struct closure *cl)
 147{
 148	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
 149
 150	/*
 151	 * Our data write just errored, which means we've got a bunch of keys to
 152	 * insert that point to data that wasn't succesfully written.
 153	 *
 154	 * We don't have to insert those keys but we still have to invalidate
 155	 * that region of the cache - so, if we just strip off all the pointers
 156	 * from the keys we'll accomplish just that.
 157	 */
 158
 159	struct bkey *src = op->insert_keys.keys, *dst = op->insert_keys.keys;
 160
 161	while (src != op->insert_keys.top) {
 162		struct bkey *n = bkey_next(src);
 163
 164		SET_KEY_PTRS(src, 0);
 165		memmove(dst, src, bkey_bytes(src));
 166
 167		dst = bkey_next(dst);
 168		src = n;
 169	}
 170
 171	op->insert_keys.top = dst;
 172
 173	bch_data_insert_keys(cl);
 174}
 175
 176static void bch_data_insert_endio(struct bio *bio)
 177{
 178	struct closure *cl = bio->bi_private;
 179	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
 180
 181	if (bio->bi_error) {
 182		/* TODO: We could try to recover from this. */
 183		if (op->writeback)
 184			op->error = bio->bi_error;
 185		else if (!op->replace)
 186			set_closure_fn(cl, bch_data_insert_error, op->wq);
 187		else
 188			set_closure_fn(cl, NULL, NULL);
 189	}
 190
 191	bch_bbio_endio(op->c, bio, bio->bi_error, "writing data to cache");
 192}
 193
 194static void bch_data_insert_start(struct closure *cl)
 195{
 196	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
 197	struct bio *bio = op->bio, *n;
 198
 199	if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) {
 200		set_gc_sectors(op->c);
 201		wake_up_gc(op->c);
 202	}
 203
 204	if (op->bypass)
 205		return bch_data_invalidate(cl);
 206
 207	/*
 208	 * Journal writes are marked REQ_FLUSH; if the original write was a
 209	 * flush, it'll wait on the journal write.
 210	 */
 211	bio->bi_rw &= ~(REQ_FLUSH|REQ_FUA);
 212
 213	do {
 214		unsigned i;
 215		struct bkey *k;
 216		struct bio_set *split = op->c->bio_split;
 217
 218		/* 1 for the device pointer and 1 for the chksum */
 219		if (bch_keylist_realloc(&op->insert_keys,
 220					3 + (op->csum ? 1 : 0),
 221					op->c)) {
 222			continue_at(cl, bch_data_insert_keys, op->wq);
 223			return;
 224		}
 225
 226		k = op->insert_keys.top;
 227		bkey_init(k);
 228		SET_KEY_INODE(k, op->inode);
 229		SET_KEY_OFFSET(k, bio->bi_iter.bi_sector);
 230
 231		if (!bch_alloc_sectors(op->c, k, bio_sectors(bio),
 232				       op->write_point, op->write_prio,
 233				       op->writeback))
 234			goto err;
 235
 236		n = bio_next_split(bio, KEY_SIZE(k), GFP_NOIO, split);
 237
 238		n->bi_end_io	= bch_data_insert_endio;
 239		n->bi_private	= cl;
 240
 241		if (op->writeback) {
 242			SET_KEY_DIRTY(k, true);
 243
 244			for (i = 0; i < KEY_PTRS(k); i++)
 245				SET_GC_MARK(PTR_BUCKET(op->c, k, i),
 246					    GC_MARK_DIRTY);
 247		}
 248
 249		SET_KEY_CSUM(k, op->csum);
 250		if (KEY_CSUM(k))
 251			bio_csum(n, k);
 252
 253		trace_bcache_cache_insert(k);
 254		bch_keylist_push(&op->insert_keys);
 255
 256		n->bi_rw |= REQ_WRITE;
 257		bch_submit_bbio(n, op->c, k, 0);
 258	} while (n != bio);
 259
 260	op->insert_data_done = true;
 261	continue_at(cl, bch_data_insert_keys, op->wq);
 262	return;
 263err:
 264	/* bch_alloc_sectors() blocks if s->writeback = true */
 265	BUG_ON(op->writeback);
 266
 267	/*
 268	 * But if it's not a writeback write we'd rather just bail out if
 269	 * there aren't any buckets ready to write to - it might take awhile and
 270	 * we might be starving btree writes for gc or something.
 271	 */
 272
 273	if (!op->replace) {
 274		/*
 275		 * Writethrough write: We can't complete the write until we've
 276		 * updated the index. But we don't want to delay the write while
 277		 * we wait for buckets to be freed up, so just invalidate the
 278		 * rest of the write.
 279		 */
 280		op->bypass = true;
 281		return bch_data_invalidate(cl);
 282	} else {
 283		/*
 284		 * From a cache miss, we can just insert the keys for the data
 285		 * we have written or bail out if we didn't do anything.
 286		 */
 287		op->insert_data_done = true;
 288		bio_put(bio);
 289
 290		if (!bch_keylist_empty(&op->insert_keys))
 291			continue_at(cl, bch_data_insert_keys, op->wq);
 292		else
 293			closure_return(cl);
 294	}
 295}
 296
 297/**
 298 * bch_data_insert - stick some data in the cache
 299 *
 300 * This is the starting point for any data to end up in a cache device; it could
 301 * be from a normal write, or a writeback write, or a write to a flash only
 302 * volume - it's also used by the moving garbage collector to compact data in
 303 * mostly empty buckets.
 304 *
 305 * It first writes the data to the cache, creating a list of keys to be inserted
 306 * (if the data had to be fragmented there will be multiple keys); after the
 307 * data is written it calls bch_journal, and after the keys have been added to
 308 * the next journal write they're inserted into the btree.
 309 *
 310 * It inserts the data in s->cache_bio; bi_sector is used for the key offset,
 311 * and op->inode is used for the key inode.
 312 *
 313 * If s->bypass is true, instead of inserting the data it invalidates the
 314 * region of the cache represented by s->cache_bio and op->inode.
 315 */
 316void bch_data_insert(struct closure *cl)
 317{
 318	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
 319
 320	trace_bcache_write(op->c, op->inode, op->bio,
 321			   op->writeback, op->bypass);
 322
 323	bch_keylist_init(&op->insert_keys);
 324	bio_get(op->bio);
 325	bch_data_insert_start(cl);
 326}
 327
 328/* Congested? */
 329
 330unsigned bch_get_congested(struct cache_set *c)
 331{
 332	int i;
 333	long rand;
 334
 335	if (!c->congested_read_threshold_us &&
 336	    !c->congested_write_threshold_us)
 337		return 0;
 338
 339	i = (local_clock_us() - c->congested_last_us) / 1024;
 340	if (i < 0)
 341		return 0;
 342
 343	i += atomic_read(&c->congested);
 344	if (i >= 0)
 345		return 0;
 346
 347	i += CONGESTED_MAX;
 348
 349	if (i > 0)
 350		i = fract_exp_two(i, 6);
 351
 352	rand = get_random_int();
 353	i -= bitmap_weight(&rand, BITS_PER_LONG);
 354
 355	return i > 0 ? i : 1;
 356}
 357
 358static void add_sequential(struct task_struct *t)
 359{
 360	ewma_add(t->sequential_io_avg,
 361		 t->sequential_io, 8, 0);
 362
 363	t->sequential_io = 0;
 364}
 365
 366static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)
 367{
 368	return &dc->io_hash[hash_64(k, RECENT_IO_BITS)];
 369}
 370
 371static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
 372{
 373	struct cache_set *c = dc->disk.c;
 374	unsigned mode = cache_mode(dc, bio);
 375	unsigned sectors, congested = bch_get_congested(c);
 376	struct task_struct *task = current;
 377	struct io *i;
 378
 379	if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
 380	    c->gc_stats.in_use > CUTOFF_CACHE_ADD ||
 381	    (bio->bi_rw & REQ_DISCARD))
 382		goto skip;
 383
 384	if (mode == CACHE_MODE_NONE ||
 385	    (mode == CACHE_MODE_WRITEAROUND &&
 386	     (bio->bi_rw & REQ_WRITE)))
 387		goto skip;
 388
 389	if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
 390	    bio_sectors(bio) & (c->sb.block_size - 1)) {
 391		pr_debug("skipping unaligned io");
 392		goto skip;
 393	}
 394
 395	if (bypass_torture_test(dc)) {
 396		if ((get_random_int() & 3) == 3)
 397			goto skip;
 398		else
 399			goto rescale;
 400	}
 401
 402	if (!congested && !dc->sequential_cutoff)
 403		goto rescale;
 404
 405	if (!congested &&
 406	    mode == CACHE_MODE_WRITEBACK &&
 407	    (bio->bi_rw & REQ_WRITE) &&
 408	    (bio->bi_rw & REQ_SYNC))
 409		goto rescale;
 410
 411	spin_lock(&dc->io_lock);
 412
 413	hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash)
 414		if (i->last == bio->bi_iter.bi_sector &&
 415		    time_before(jiffies, i->jiffies))
 416			goto found;
 417
 418	i = list_first_entry(&dc->io_lru, struct io, lru);
 419
 420	add_sequential(task);
 421	i->sequential = 0;
 422found:
 423	if (i->sequential + bio->bi_iter.bi_size > i->sequential)
 424		i->sequential	+= bio->bi_iter.bi_size;
 425
 426	i->last			 = bio_end_sector(bio);
 427	i->jiffies		 = jiffies + msecs_to_jiffies(5000);
 428	task->sequential_io	 = i->sequential;
 429
 430	hlist_del(&i->hash);
 431	hlist_add_head(&i->hash, iohash(dc, i->last));
 432	list_move_tail(&i->lru, &dc->io_lru);
 433
 434	spin_unlock(&dc->io_lock);
 435
 436	sectors = max(task->sequential_io,
 437		      task->sequential_io_avg) >> 9;
 438
 439	if (dc->sequential_cutoff &&
 440	    sectors >= dc->sequential_cutoff >> 9) {
 441		trace_bcache_bypass_sequential(bio);
 442		goto skip;
 443	}
 444
 445	if (congested && sectors >= congested) {
 446		trace_bcache_bypass_congested(bio);
 447		goto skip;
 448	}
 449
 450rescale:
 451	bch_rescale_priorities(c, bio_sectors(bio));
 452	return false;
 453skip:
 454	bch_mark_sectors_bypassed(c, dc, bio_sectors(bio));
 455	return true;
 456}
 457
 458/* Cache lookup */
 459
 460struct search {
 461	/* Stack frame for bio_complete */
 462	struct closure		cl;
 463
 464	struct bbio		bio;
 465	struct bio		*orig_bio;
 466	struct bio		*cache_miss;
 467	struct bcache_device	*d;
 468
 469	unsigned		insert_bio_sectors;
 470	unsigned		recoverable:1;
 471	unsigned		write:1;
 472	unsigned		read_dirty_data:1;
 473
 474	unsigned long		start_time;
 475
 476	struct btree_op		op;
 477	struct data_insert_op	iop;
 478};
 479
 480static void bch_cache_read_endio(struct bio *bio)
 481{
 482	struct bbio *b = container_of(bio, struct bbio, bio);
 483	struct closure *cl = bio->bi_private;
 484	struct search *s = container_of(cl, struct search, cl);
 485
 486	/*
 487	 * If the bucket was reused while our bio was in flight, we might have
 488	 * read the wrong data. Set s->error but not error so it doesn't get
 489	 * counted against the cache device, but we'll still reread the data
 490	 * from the backing device.
 491	 */
 492
 493	if (bio->bi_error)
 494		s->iop.error = bio->bi_error;
 495	else if (!KEY_DIRTY(&b->key) &&
 496		 ptr_stale(s->iop.c, &b->key, 0)) {
 497		atomic_long_inc(&s->iop.c->cache_read_races);
 498		s->iop.error = -EINTR;
 499	}
 500
 501	bch_bbio_endio(s->iop.c, bio, bio->bi_error, "reading from cache");
 502}
 503
 504/*
 505 * Read from a single key, handling the initial cache miss if the key starts in
 506 * the middle of the bio
 507 */
 508static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
 509{
 510	struct search *s = container_of(op, struct search, op);
 511	struct bio *n, *bio = &s->bio.bio;
 512	struct bkey *bio_key;
 513	unsigned ptr;
 514
 515	if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0)
 516		return MAP_CONTINUE;
 517
 518	if (KEY_INODE(k) != s->iop.inode ||
 519	    KEY_START(k) > bio->bi_iter.bi_sector) {
 520		unsigned bio_sectors = bio_sectors(bio);
 521		unsigned sectors = KEY_INODE(k) == s->iop.inode
 522			? min_t(uint64_t, INT_MAX,
 523				KEY_START(k) - bio->bi_iter.bi_sector)
 524			: INT_MAX;
 525
 526		int ret = s->d->cache_miss(b, s, bio, sectors);
 527		if (ret != MAP_CONTINUE)
 528			return ret;
 529
 530		/* if this was a complete miss we shouldn't get here */
 531		BUG_ON(bio_sectors <= sectors);
 532	}
 533
 534	if (!KEY_SIZE(k))
 535		return MAP_CONTINUE;
 536
 537	/* XXX: figure out best pointer - for multiple cache devices */
 538	ptr = 0;
 539
 540	PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO;
 541
 542	if (KEY_DIRTY(k))
 543		s->read_dirty_data = true;
 544
 545	n = bio_next_split(bio, min_t(uint64_t, INT_MAX,
 546				      KEY_OFFSET(k) - bio->bi_iter.bi_sector),
 547			   GFP_NOIO, s->d->bio_split);
 548
 549	bio_key = &container_of(n, struct bbio, bio)->key;
 550	bch_bkey_copy_single_ptr(bio_key, k, ptr);
 551
 552	bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key);
 553	bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key);
 554
 555	n->bi_end_io	= bch_cache_read_endio;
 556	n->bi_private	= &s->cl;
 557
 558	/*
 559	 * The bucket we're reading from might be reused while our bio
 560	 * is in flight, and we could then end up reading the wrong
 561	 * data.
 562	 *
 563	 * We guard against this by checking (in cache_read_endio()) if
 564	 * the pointer is stale again; if so, we treat it as an error
 565	 * and reread from the backing device (but we don't pass that
 566	 * error up anywhere).
 567	 */
 568
 569	__bch_submit_bbio(n, b->c);
 570	return n == bio ? MAP_DONE : MAP_CONTINUE;
 571}
 572
 573static void cache_lookup(struct closure *cl)
 574{
 575	struct search *s = container_of(cl, struct search, iop.cl);
 576	struct bio *bio = &s->bio.bio;
 577	int ret;
 578
 579	bch_btree_op_init(&s->op, -1);
 580
 581	ret = bch_btree_map_keys(&s->op, s->iop.c,
 582				 &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0),
 583				 cache_lookup_fn, MAP_END_KEY);
 584	if (ret == -EAGAIN) {
 585		continue_at(cl, cache_lookup, bcache_wq);
 586		return;
 587	}
 588
 589	closure_return(cl);
 590}
 591
 592/* Common code for the make_request functions */
 593
 594static void request_endio(struct bio *bio)
 595{
 596	struct closure *cl = bio->bi_private;
 597
 598	if (bio->bi_error) {
 599		struct search *s = container_of(cl, struct search, cl);
 600		s->iop.error = bio->bi_error;
 601		/* Only cache read errors are recoverable */
 602		s->recoverable = false;
 603	}
 604
 605	bio_put(bio);
 606	closure_put(cl);
 607}
 608
 609static void bio_complete(struct search *s)
 610{
 611	if (s->orig_bio) {
 612		generic_end_io_acct(bio_data_dir(s->orig_bio),
 613				    &s->d->disk->part0, s->start_time);
 614
 615		trace_bcache_request_end(s->d, s->orig_bio);
 616		s->orig_bio->bi_error = s->iop.error;
 617		bio_endio(s->orig_bio);
 618		s->orig_bio = NULL;
 619	}
 620}
 621
 622static void do_bio_hook(struct search *s, struct bio *orig_bio)
 623{
 624	struct bio *bio = &s->bio.bio;
 625
 626	bio_init(bio);
 627	__bio_clone_fast(bio, orig_bio);
 628	bio->bi_end_io		= request_endio;
 629	bio->bi_private		= &s->cl;
 630
 631	bio_cnt_set(bio, 3);
 632}
 633
 634static void search_free(struct closure *cl)
 635{
 636	struct search *s = container_of(cl, struct search, cl);
 637	bio_complete(s);
 638
 639	if (s->iop.bio)
 640		bio_put(s->iop.bio);
 641
 642	closure_debug_destroy(cl);
 643	mempool_free(s, s->d->c->search);
 644}
 645
 646static inline struct search *search_alloc(struct bio *bio,
 647					  struct bcache_device *d)
 648{
 649	struct search *s;
 650
 651	s = mempool_alloc(d->c->search, GFP_NOIO);
 652
 653	closure_init(&s->cl, NULL);
 654	do_bio_hook(s, bio);
 655
 656	s->orig_bio		= bio;
 657	s->cache_miss		= NULL;
 658	s->d			= d;
 659	s->recoverable		= 1;
 660	s->write		= (bio->bi_rw & REQ_WRITE) != 0;
 661	s->read_dirty_data	= 0;
 662	s->start_time		= jiffies;
 663
 664	s->iop.c		= d->c;
 665	s->iop.bio		= NULL;
 666	s->iop.inode		= d->id;
 667	s->iop.write_point	= hash_long((unsigned long) current, 16);
 668	s->iop.write_prio	= 0;
 669	s->iop.error		= 0;
 670	s->iop.flags		= 0;
 671	s->iop.flush_journal	= (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0;
 672	s->iop.wq		= bcache_wq;
 673
 674	return s;
 675}
 676
 677/* Cached devices */
 678
 679static void cached_dev_bio_complete(struct closure *cl)
 680{
 681	struct search *s = container_of(cl, struct search, cl);
 682	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
 683
 684	search_free(cl);
 685	cached_dev_put(dc);
 686}
 687
 688/* Process reads */
 689
 690static void cached_dev_cache_miss_done(struct closure *cl)
 691{
 692	struct search *s = container_of(cl, struct search, cl);
 693
 694	if (s->iop.replace_collision)
 695		bch_mark_cache_miss_collision(s->iop.c, s->d);
 696
 697	if (s->iop.bio) {
 698		int i;
 699		struct bio_vec *bv;
 700
 701		bio_for_each_segment_all(bv, s->iop.bio, i)
 702			__free_page(bv->bv_page);
 703	}
 704
 705	cached_dev_bio_complete(cl);
 706}
 707
 708static void cached_dev_read_error(struct closure *cl)
 709{
 710	struct search *s = container_of(cl, struct search, cl);
 711	struct bio *bio = &s->bio.bio;
 712
 713	if (s->recoverable) {
 714		/* Retry from the backing device: */
 715		trace_bcache_read_retry(s->orig_bio);
 716
 717		s->iop.error = 0;
 718		do_bio_hook(s, s->orig_bio);
 719
 720		/* XXX: invalidate cache */
 721
 722		closure_bio_submit(bio, cl);
 723	}
 724
 725	continue_at(cl, cached_dev_cache_miss_done, NULL);
 726}
 727
 728static void cached_dev_read_done(struct closure *cl)
 729{
 730	struct search *s = container_of(cl, struct search, cl);
 731	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
 732
 733	/*
 734	 * We had a cache miss; cache_bio now contains data ready to be inserted
 735	 * into the cache.
 736	 *
 737	 * First, we copy the data we just read from cache_bio's bounce buffers
 738	 * to the buffers the original bio pointed to:
 739	 */
 740
 741	if (s->iop.bio) {
 742		bio_reset(s->iop.bio);
 743		s->iop.bio->bi_iter.bi_sector = s->cache_miss->bi_iter.bi_sector;
 744		s->iop.bio->bi_bdev = s->cache_miss->bi_bdev;
 745		s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
 746		bch_bio_map(s->iop.bio, NULL);
 747
 748		bio_copy_data(s->cache_miss, s->iop.bio);
 749
 750		bio_put(s->cache_miss);
 751		s->cache_miss = NULL;
 752	}
 753
 754	if (verify(dc, &s->bio.bio) && s->recoverable && !s->read_dirty_data)
 755		bch_data_verify(dc, s->orig_bio);
 756
 757	bio_complete(s);
 758
 759	if (s->iop.bio &&
 760	    !test_bit(CACHE_SET_STOPPING, &s->iop.c->flags)) {
 761		BUG_ON(!s->iop.replace);
 762		closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
 763	}
 764
 765	continue_at(cl, cached_dev_cache_miss_done, NULL);
 766}
 767
 768static void cached_dev_read_done_bh(struct closure *cl)
 769{
 770	struct search *s = container_of(cl, struct search, cl);
 771	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
 772
 773	bch_mark_cache_accounting(s->iop.c, s->d,
 774				  !s->cache_miss, s->iop.bypass);
 775	trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass);
 776
 777	if (s->iop.error)
 778		continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq);
 779	else if (s->iop.bio || verify(dc, &s->bio.bio))
 780		continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq);
 781	else
 782		continue_at_nobarrier(cl, cached_dev_bio_complete, NULL);
 783}
 784
 785static int cached_dev_cache_miss(struct btree *b, struct search *s,
 786				 struct bio *bio, unsigned sectors)
 787{
 788	int ret = MAP_CONTINUE;
 789	unsigned reada = 0;
 790	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
 791	struct bio *miss, *cache_bio;
 792
 793	if (s->cache_miss || s->iop.bypass) {
 794		miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);
 795		ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
 796		goto out_submit;
 797	}
 798
 799	if (!(bio->bi_rw & REQ_RAHEAD) &&
 800	    !(bio->bi_rw & REQ_META) &&
 801	    s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)
 802		reada = min_t(sector_t, dc->readahead >> 9,
 803			      bdev_sectors(bio->bi_bdev) - bio_end_sector(bio));
 804
 805	s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada);
 806
 807	s->iop.replace_key = KEY(s->iop.inode,
 808				 bio->bi_iter.bi_sector + s->insert_bio_sectors,
 809				 s->insert_bio_sectors);
 810
 811	ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key);
 812	if (ret)
 813		return ret;
 814
 815	s->iop.replace = true;
 816
 817	miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);
 818
 819	/* btree_search_recurse()'s btree iterator is no good anymore */
 820	ret = miss == bio ? MAP_DONE : -EINTR;
 821
 822	cache_bio = bio_alloc_bioset(GFP_NOWAIT,
 823			DIV_ROUND_UP(s->insert_bio_sectors, PAGE_SECTORS),
 824			dc->disk.bio_split);
 825	if (!cache_bio)
 826		goto out_submit;
 827
 828	cache_bio->bi_iter.bi_sector	= miss->bi_iter.bi_sector;
 829	cache_bio->bi_bdev		= miss->bi_bdev;
 830	cache_bio->bi_iter.bi_size	= s->insert_bio_sectors << 9;
 831
 832	cache_bio->bi_end_io	= request_endio;
 833	cache_bio->bi_private	= &s->cl;
 834
 835	bch_bio_map(cache_bio, NULL);
 836	if (bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO))
 837		goto out_put;
 838
 839	if (reada)
 840		bch_mark_cache_readahead(s->iop.c, s->d);
 841
 842	s->cache_miss	= miss;
 843	s->iop.bio	= cache_bio;
 844	bio_get(cache_bio);
 845	closure_bio_submit(cache_bio, &s->cl);
 846
 847	return ret;
 848out_put:
 849	bio_put(cache_bio);
 850out_submit:
 851	miss->bi_end_io		= request_endio;
 852	miss->bi_private	= &s->cl;
 853	closure_bio_submit(miss, &s->cl);
 854	return ret;
 855}
 856
 857static void cached_dev_read(struct cached_dev *dc, struct search *s)
 858{
 859	struct closure *cl = &s->cl;
 860
 861	closure_call(&s->iop.cl, cache_lookup, NULL, cl);
 862	continue_at(cl, cached_dev_read_done_bh, NULL);
 863}
 864
 865/* Process writes */
 866
 867static void cached_dev_write_complete(struct closure *cl)
 868{
 869	struct search *s = container_of(cl, struct search, cl);
 870	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
 871
 872	up_read_non_owner(&dc->writeback_lock);
 873	cached_dev_bio_complete(cl);
 874}
 875
 876static void cached_dev_write(struct cached_dev *dc, struct search *s)
 877{
 878	struct closure *cl = &s->cl;
 879	struct bio *bio = &s->bio.bio;
 880	struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0);
 881	struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0);
 882
 883	bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end);
 884
 885	down_read_non_owner(&dc->writeback_lock);
 886	if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) {
 887		/*
 888		 * We overlap with some dirty data undergoing background
 889		 * writeback, force this write to writeback
 890		 */
 891		s->iop.bypass = false;
 892		s->iop.writeback = true;
 893	}
 894
 895	/*
 896	 * Discards aren't _required_ to do anything, so skipping if
 897	 * check_overlapping returned true is ok
 898	 *
 899	 * But check_overlapping drops dirty keys for which io hasn't started,
 900	 * so we still want to call it.
 901	 */
 902	if (bio->bi_rw & REQ_DISCARD)
 903		s->iop.bypass = true;
 904
 905	if (should_writeback(dc, s->orig_bio,
 906			     cache_mode(dc, bio),
 907			     s->iop.bypass)) {
 908		s->iop.bypass = false;
 909		s->iop.writeback = true;
 910	}
 911
 912	if (s->iop.bypass) {
 913		s->iop.bio = s->orig_bio;
 914		bio_get(s->iop.bio);
 915
 916		if (!(bio->bi_rw & REQ_DISCARD) ||
 917		    blk_queue_discard(bdev_get_queue(dc->bdev)))
 918			closure_bio_submit(bio, cl);
 919	} else if (s->iop.writeback) {
 920		bch_writeback_add(dc);
 921		s->iop.bio = bio;
 922
 923		if (bio->bi_rw & REQ_FLUSH) {
 924			/* Also need to send a flush to the backing device */
 925			struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0,
 926							     dc->disk.bio_split);
 927
 928			flush->bi_rw	= WRITE_FLUSH;
 929			flush->bi_bdev	= bio->bi_bdev;
 930			flush->bi_end_io = request_endio;
 931			flush->bi_private = cl;
 932
 933			closure_bio_submit(flush, cl);
 934		}
 935	} else {
 936		s->iop.bio = bio_clone_fast(bio, GFP_NOIO, dc->disk.bio_split);
 937
 938		closure_bio_submit(bio, cl);
 939	}
 940
 941	closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
 942	continue_at(cl, cached_dev_write_complete, NULL);
 943}
 944
 945static void cached_dev_nodata(struct closure *cl)
 946{
 947	struct search *s = container_of(cl, struct search, cl);
 948	struct bio *bio = &s->bio.bio;
 949
 950	if (s->iop.flush_journal)
 951		bch_journal_meta(s->iop.c, cl);
 952
 953	/* If it's a flush, we send the flush to the backing device too */
 954	closure_bio_submit(bio, cl);
 955
 956	continue_at(cl, cached_dev_bio_complete, NULL);
 957}
 958
 959/* Cached devices - read & write stuff */
 960
 961static blk_qc_t cached_dev_make_request(struct request_queue *q,
 962					struct bio *bio)
 963{
 964	struct search *s;
 965	struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
 966	struct cached_dev *dc = container_of(d, struct cached_dev, disk);
 967	int rw = bio_data_dir(bio);
 968
 969	generic_start_io_acct(rw, bio_sectors(bio), &d->disk->part0);
 970
 971	bio->bi_bdev = dc->bdev;
 972	bio->bi_iter.bi_sector += dc->sb.data_offset;
 973
 974	if (cached_dev_get(dc)) {
 975		s = search_alloc(bio, d);
 976		trace_bcache_request_start(s->d, bio);
 977
 978		if (!bio->bi_iter.bi_size) {
 979			/*
 980			 * can't call bch_journal_meta from under
 981			 * generic_make_request
 982			 */
 983			continue_at_nobarrier(&s->cl,
 984					      cached_dev_nodata,
 985					      bcache_wq);
 986		} else {
 987			s->iop.bypass = check_should_bypass(dc, bio);
 988
 989			if (rw)
 990				cached_dev_write(dc, s);
 991			else
 992				cached_dev_read(dc, s);
 993		}
 994	} else {
 995		if ((bio->bi_rw & REQ_DISCARD) &&
 996		    !blk_queue_discard(bdev_get_queue(dc->bdev)))
 997			bio_endio(bio);
 998		else
 999			generic_make_request(bio);
1000	}
1001
1002	return BLK_QC_T_NONE;
1003}
1004
1005static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode,
1006			    unsigned int cmd, unsigned long arg)
1007{
1008	struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1009	return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg);
1010}
1011
1012static int cached_dev_congested(void *data, int bits)
1013{
1014	struct bcache_device *d = data;
1015	struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1016	struct request_queue *q = bdev_get_queue(dc->bdev);
1017	int ret = 0;
1018
1019	if (bdi_congested(&q->backing_dev_info, bits))
1020		return 1;
1021
1022	if (cached_dev_get(dc)) {
1023		unsigned i;
1024		struct cache *ca;
1025
1026		for_each_cache(ca, d->c, i) {
1027			q = bdev_get_queue(ca->bdev);
1028			ret |= bdi_congested(&q->backing_dev_info, bits);
1029		}
1030
1031		cached_dev_put(dc);
1032	}
1033
1034	return ret;
1035}
1036
1037void bch_cached_dev_request_init(struct cached_dev *dc)
1038{
1039	struct gendisk *g = dc->disk.disk;
1040
1041	g->queue->make_request_fn		= cached_dev_make_request;
1042	g->queue->backing_dev_info.congested_fn = cached_dev_congested;
1043	dc->disk.cache_miss			= cached_dev_cache_miss;
1044	dc->disk.ioctl				= cached_dev_ioctl;
1045}
1046
1047/* Flash backed devices */
1048
1049static int flash_dev_cache_miss(struct btree *b, struct search *s,
1050				struct bio *bio, unsigned sectors)
1051{
1052	unsigned bytes = min(sectors, bio_sectors(bio)) << 9;
1053
1054	swap(bio->bi_iter.bi_size, bytes);
1055	zero_fill_bio(bio);
1056	swap(bio->bi_iter.bi_size, bytes);
1057
1058	bio_advance(bio, bytes);
1059
1060	if (!bio->bi_iter.bi_size)
1061		return MAP_DONE;
1062
1063	return MAP_CONTINUE;
1064}
1065
1066static void flash_dev_nodata(struct closure *cl)
1067{
1068	struct search *s = container_of(cl, struct search, cl);
1069
1070	if (s->iop.flush_journal)
1071		bch_journal_meta(s->iop.c, cl);
1072
1073	continue_at(cl, search_free, NULL);
1074}
1075
1076static blk_qc_t flash_dev_make_request(struct request_queue *q,
1077					     struct bio *bio)
1078{
1079	struct search *s;
1080	struct closure *cl;
1081	struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
1082	int rw = bio_data_dir(bio);
1083
1084	generic_start_io_acct(rw, bio_sectors(bio), &d->disk->part0);
1085
1086	s = search_alloc(bio, d);
1087	cl = &s->cl;
1088	bio = &s->bio.bio;
1089
1090	trace_bcache_request_start(s->d, bio);
1091
1092	if (!bio->bi_iter.bi_size) {
1093		/*
1094		 * can't call bch_journal_meta from under
1095		 * generic_make_request
1096		 */
1097		continue_at_nobarrier(&s->cl,
1098				      flash_dev_nodata,
1099				      bcache_wq);
1100		return BLK_QC_T_NONE;
1101	} else if (rw) {
1102		bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
1103					&KEY(d->id, bio->bi_iter.bi_sector, 0),
1104					&KEY(d->id, bio_end_sector(bio), 0));
1105
1106		s->iop.bypass		= (bio->bi_rw & REQ_DISCARD) != 0;
1107		s->iop.writeback	= true;
1108		s->iop.bio		= bio;
1109
1110		closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
1111	} else {
1112		closure_call(&s->iop.cl, cache_lookup, NULL, cl);
1113	}
1114
1115	continue_at(cl, search_free, NULL);
1116	return BLK_QC_T_NONE;
1117}
1118
1119static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode,
1120			   unsigned int cmd, unsigned long arg)
1121{
1122	return -ENOTTY;
1123}
1124
1125static int flash_dev_congested(void *data, int bits)
1126{
1127	struct bcache_device *d = data;
1128	struct request_queue *q;
1129	struct cache *ca;
1130	unsigned i;
1131	int ret = 0;
1132
1133	for_each_cache(ca, d->c, i) {
1134		q = bdev_get_queue(ca->bdev);
1135		ret |= bdi_congested(&q->backing_dev_info, bits);
1136	}
1137
1138	return ret;
1139}
1140
1141void bch_flash_dev_request_init(struct bcache_device *d)
1142{
1143	struct gendisk *g = d->disk;
1144
1145	g->queue->make_request_fn		= flash_dev_make_request;
1146	g->queue->backing_dev_info.congested_fn = flash_dev_congested;
1147	d->cache_miss				= flash_dev_cache_miss;
1148	d->ioctl				= flash_dev_ioctl;
1149}
1150
1151void bch_request_exit(void)
1152{
1153	if (bch_search_cache)
1154		kmem_cache_destroy(bch_search_cache);
1155}
1156
1157int __init bch_request_init(void)
1158{
1159	bch_search_cache = KMEM_CACHE(search, 0);
1160	if (!bch_search_cache)
1161		return -ENOMEM;
1162
1163	return 0;
1164}