Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Mar 24-27, 2025, special US time zones
Register
Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Main bcache entry point - handle a read or a write request and decide what to
   4 * do with it; the make_request functions are called by the block layer.
   5 *
   6 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
   7 * Copyright 2012 Google, Inc.
   8 */
   9
  10#include "bcache.h"
  11#include "btree.h"
  12#include "debug.h"
  13#include "request.h"
  14#include "writeback.h"
  15
  16#include <linux/module.h>
  17#include <linux/hash.h>
  18#include <linux/random.h>
  19#include <linux/backing-dev.h>
  20
  21#include <trace/events/bcache.h>
  22
  23#define CUTOFF_CACHE_ADD	95
  24#define CUTOFF_CACHE_READA	90
  25
  26struct kmem_cache *bch_search_cache;
  27
  28static void bch_data_insert_start(struct closure *cl);
  29
  30static unsigned int cache_mode(struct cached_dev *dc)
  31{
  32	return BDEV_CACHE_MODE(&dc->sb);
  33}
  34
  35static bool verify(struct cached_dev *dc)
  36{
  37	return dc->verify;
  38}
  39
  40static void bio_csum(struct bio *bio, struct bkey *k)
  41{
  42	struct bio_vec bv;
  43	struct bvec_iter iter;
  44	uint64_t csum = 0;
  45
  46	bio_for_each_segment(bv, bio, iter) {
  47		void *d = kmap(bv.bv_page) + bv.bv_offset;
  48
  49		csum = bch_crc64_update(csum, d, bv.bv_len);
  50		kunmap(bv.bv_page);
  51	}
  52
  53	k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1);
  54}
  55
  56/* Insert data into cache */
  57
  58static void bch_data_insert_keys(struct closure *cl)
  59{
  60	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
  61	atomic_t *journal_ref = NULL;
  62	struct bkey *replace_key = op->replace ? &op->replace_key : NULL;
  63	int ret;
  64
  65	/*
  66	 * If we're looping, might already be waiting on
  67	 * another journal write - can't wait on more than one journal write at
  68	 * a time
  69	 *
  70	 * XXX: this looks wrong
  71	 */
  72#if 0
  73	while (atomic_read(&s->cl.remaining) & CLOSURE_WAITING)
  74		closure_sync(&s->cl);
  75#endif
  76
  77	if (!op->replace)
  78		journal_ref = bch_journal(op->c, &op->insert_keys,
  79					  op->flush_journal ? cl : NULL);
  80
  81	ret = bch_btree_insert(op->c, &op->insert_keys,
  82			       journal_ref, replace_key);
  83	if (ret == -ESRCH) {
  84		op->replace_collision = true;
  85	} else if (ret) {
  86		op->status		= BLK_STS_RESOURCE;
  87		op->insert_data_done	= true;
  88	}
  89
  90	if (journal_ref)
  91		atomic_dec_bug(journal_ref);
  92
  93	if (!op->insert_data_done) {
  94		continue_at(cl, bch_data_insert_start, op->wq);
  95		return;
  96	}
  97
  98	bch_keylist_free(&op->insert_keys);
  99	closure_return(cl);
 100}
 101
 102static int bch_keylist_realloc(struct keylist *l, unsigned int u64s,
 103			       struct cache_set *c)
 104{
 105	size_t oldsize = bch_keylist_nkeys(l);
 106	size_t newsize = oldsize + u64s;
 107
 108	/*
 109	 * The journalling code doesn't handle the case where the keys to insert
 110	 * is bigger than an empty write: If we just return -ENOMEM here,
 111	 * bch_data_insert_keys() will insert the keys created so far
 112	 * and finish the rest when the keylist is empty.
 113	 */
 114	if (newsize * sizeof(uint64_t) > block_bytes(c) - sizeof(struct jset))
 115		return -ENOMEM;
 116
 117	return __bch_keylist_realloc(l, u64s);
 118}
 119
 120static void bch_data_invalidate(struct closure *cl)
 121{
 122	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
 123	struct bio *bio = op->bio;
 124
 125	pr_debug("invalidating %i sectors from %llu",
 126		 bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector);
 127
 128	while (bio_sectors(bio)) {
 129		unsigned int sectors = min(bio_sectors(bio),
 130				       1U << (KEY_SIZE_BITS - 1));
 131
 132		if (bch_keylist_realloc(&op->insert_keys, 2, op->c))
 133			goto out;
 134
 135		bio->bi_iter.bi_sector	+= sectors;
 136		bio->bi_iter.bi_size	-= sectors << 9;
 137
 138		bch_keylist_add(&op->insert_keys,
 139				&KEY(op->inode,
 140				     bio->bi_iter.bi_sector,
 141				     sectors));
 142	}
 143
 144	op->insert_data_done = true;
 145	/* get in bch_data_insert() */
 146	bio_put(bio);
 147out:
 148	continue_at(cl, bch_data_insert_keys, op->wq);
 149}
 150
 151static void bch_data_insert_error(struct closure *cl)
 152{
 153	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
 154
 155	/*
 156	 * Our data write just errored, which means we've got a bunch of keys to
 157	 * insert that point to data that wasn't successfully written.
 158	 *
 159	 * We don't have to insert those keys but we still have to invalidate
 160	 * that region of the cache - so, if we just strip off all the pointers
 161	 * from the keys we'll accomplish just that.
 162	 */
 163
 164	struct bkey *src = op->insert_keys.keys, *dst = op->insert_keys.keys;
 165
 166	while (src != op->insert_keys.top) {
 167		struct bkey *n = bkey_next(src);
 168
 169		SET_KEY_PTRS(src, 0);
 170		memmove(dst, src, bkey_bytes(src));
 171
 172		dst = bkey_next(dst);
 173		src = n;
 174	}
 175
 176	op->insert_keys.top = dst;
 177
 178	bch_data_insert_keys(cl);
 179}
 180
 181static void bch_data_insert_endio(struct bio *bio)
 182{
 183	struct closure *cl = bio->bi_private;
 184	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
 185
 186	if (bio->bi_status) {
 187		/* TODO: We could try to recover from this. */
 188		if (op->writeback)
 189			op->status = bio->bi_status;
 190		else if (!op->replace)
 191			set_closure_fn(cl, bch_data_insert_error, op->wq);
 192		else
 193			set_closure_fn(cl, NULL, NULL);
 194	}
 195
 196	bch_bbio_endio(op->c, bio, bio->bi_status, "writing data to cache");
 197}
 198
 199static void bch_data_insert_start(struct closure *cl)
 200{
 201	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
 202	struct bio *bio = op->bio, *n;
 203
 204	if (op->bypass)
 205		return bch_data_invalidate(cl);
 206
 207	if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0)
 208		wake_up_gc(op->c);
 209
 210	/*
 211	 * Journal writes are marked REQ_PREFLUSH; if the original write was a
 212	 * flush, it'll wait on the journal write.
 213	 */
 214	bio->bi_opf &= ~(REQ_PREFLUSH|REQ_FUA);
 215
 216	do {
 217		unsigned int i;
 218		struct bkey *k;
 219		struct bio_set *split = &op->c->bio_split;
 220
 221		/* 1 for the device pointer and 1 for the chksum */
 222		if (bch_keylist_realloc(&op->insert_keys,
 223					3 + (op->csum ? 1 : 0),
 224					op->c)) {
 225			continue_at(cl, bch_data_insert_keys, op->wq);
 226			return;
 227		}
 228
 229		k = op->insert_keys.top;
 230		bkey_init(k);
 231		SET_KEY_INODE(k, op->inode);
 232		SET_KEY_OFFSET(k, bio->bi_iter.bi_sector);
 233
 234		if (!bch_alloc_sectors(op->c, k, bio_sectors(bio),
 235				       op->write_point, op->write_prio,
 236				       op->writeback))
 237			goto err;
 238
 239		n = bio_next_split(bio, KEY_SIZE(k), GFP_NOIO, split);
 240
 241		n->bi_end_io	= bch_data_insert_endio;
 242		n->bi_private	= cl;
 243
 244		if (op->writeback) {
 245			SET_KEY_DIRTY(k, true);
 246
 247			for (i = 0; i < KEY_PTRS(k); i++)
 248				SET_GC_MARK(PTR_BUCKET(op->c, k, i),
 249					    GC_MARK_DIRTY);
 250		}
 251
 252		SET_KEY_CSUM(k, op->csum);
 253		if (KEY_CSUM(k))
 254			bio_csum(n, k);
 255
 256		trace_bcache_cache_insert(k);
 257		bch_keylist_push(&op->insert_keys);
 258
 259		bio_set_op_attrs(n, REQ_OP_WRITE, 0);
 260		bch_submit_bbio(n, op->c, k, 0);
 261	} while (n != bio);
 262
 263	op->insert_data_done = true;
 264	continue_at(cl, bch_data_insert_keys, op->wq);
 265	return;
 266err:
 267	/* bch_alloc_sectors() blocks if s->writeback = true */
 268	BUG_ON(op->writeback);
 269
 270	/*
 271	 * But if it's not a writeback write we'd rather just bail out if
 272	 * there aren't any buckets ready to write to - it might take awhile and
 273	 * we might be starving btree writes for gc or something.
 274	 */
 275
 276	if (!op->replace) {
 277		/*
 278		 * Writethrough write: We can't complete the write until we've
 279		 * updated the index. But we don't want to delay the write while
 280		 * we wait for buckets to be freed up, so just invalidate the
 281		 * rest of the write.
 282		 */
 283		op->bypass = true;
 284		return bch_data_invalidate(cl);
 285	} else {
 286		/*
 287		 * From a cache miss, we can just insert the keys for the data
 288		 * we have written or bail out if we didn't do anything.
 289		 */
 290		op->insert_data_done = true;
 291		bio_put(bio);
 292
 293		if (!bch_keylist_empty(&op->insert_keys))
 294			continue_at(cl, bch_data_insert_keys, op->wq);
 295		else
 296			closure_return(cl);
 297	}
 298}
 299
 300/**
 301 * bch_data_insert - stick some data in the cache
 302 * @cl: closure pointer.
 303 *
 304 * This is the starting point for any data to end up in a cache device; it could
 305 * be from a normal write, or a writeback write, or a write to a flash only
 306 * volume - it's also used by the moving garbage collector to compact data in
 307 * mostly empty buckets.
 308 *
 309 * It first writes the data to the cache, creating a list of keys to be inserted
 310 * (if the data had to be fragmented there will be multiple keys); after the
 311 * data is written it calls bch_journal, and after the keys have been added to
 312 * the next journal write they're inserted into the btree.
 313 *
 314 * It inserts the data in op->bio; bi_sector is used for the key offset,
 315 * and op->inode is used for the key inode.
 316 *
 317 * If op->bypass is true, instead of inserting the data it invalidates the
 318 * region of the cache represented by op->bio and op->inode.
 319 */
 320void bch_data_insert(struct closure *cl)
 321{
 322	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
 323
 324	trace_bcache_write(op->c, op->inode, op->bio,
 325			   op->writeback, op->bypass);
 326
 327	bch_keylist_init(&op->insert_keys);
 328	bio_get(op->bio);
 329	bch_data_insert_start(cl);
 330}
 331
 332/*
 333 * Congested?  Return 0 (not congested) or the limit (in sectors)
 334 * beyond which we should bypass the cache due to congestion.
 335 */
 336unsigned int bch_get_congested(const struct cache_set *c)
 337{
 338	int i;
 
 339
 340	if (!c->congested_read_threshold_us &&
 341	    !c->congested_write_threshold_us)
 342		return 0;
 343
 344	i = (local_clock_us() - c->congested_last_us) / 1024;
 345	if (i < 0)
 346		return 0;
 347
 348	i += atomic_read(&c->congested);
 349	if (i >= 0)
 350		return 0;
 351
 352	i += CONGESTED_MAX;
 353
 354	if (i > 0)
 355		i = fract_exp_two(i, 6);
 356
 357	i -= hweight32(get_random_u32());
 
 358
 359	return i > 0 ? i : 1;
 360}
 361
 362static void add_sequential(struct task_struct *t)
 363{
 364	ewma_add(t->sequential_io_avg,
 365		 t->sequential_io, 8, 0);
 366
 367	t->sequential_io = 0;
 368}
 369
 370static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)
 371{
 372	return &dc->io_hash[hash_64(k, RECENT_IO_BITS)];
 373}
 374
 375static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
 376{
 377	struct cache_set *c = dc->disk.c;
 378	unsigned int mode = cache_mode(dc);
 379	unsigned int sectors, congested;
 380	struct task_struct *task = current;
 381	struct io *i;
 382
 383	if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
 384	    c->gc_stats.in_use > CUTOFF_CACHE_ADD ||
 385	    (bio_op(bio) == REQ_OP_DISCARD))
 386		goto skip;
 387
 388	if (mode == CACHE_MODE_NONE ||
 389	    (mode == CACHE_MODE_WRITEAROUND &&
 390	     op_is_write(bio_op(bio))))
 391		goto skip;
 392
 393	/*
 394	 * Flag for bypass if the IO is for read-ahead or background,
 395	 * unless the read-ahead request is for metadata
 396	 * (eg, for gfs2 or xfs).
 397	 */
 398	if (bio->bi_opf & (REQ_RAHEAD|REQ_BACKGROUND) &&
 399	    !(bio->bi_opf & (REQ_META|REQ_PRIO)))
 400		goto skip;
 401
 402	if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
 403	    bio_sectors(bio) & (c->sb.block_size - 1)) {
 404		pr_debug("skipping unaligned io");
 405		goto skip;
 406	}
 407
 408	if (bypass_torture_test(dc)) {
 409		if ((get_random_int() & 3) == 3)
 410			goto skip;
 411		else
 412			goto rescale;
 413	}
 414
 415	congested = bch_get_congested(c);
 416	if (!congested && !dc->sequential_cutoff)
 417		goto rescale;
 418
 419	spin_lock(&dc->io_lock);
 420
 421	hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash)
 422		if (i->last == bio->bi_iter.bi_sector &&
 423		    time_before(jiffies, i->jiffies))
 424			goto found;
 425
 426	i = list_first_entry(&dc->io_lru, struct io, lru);
 427
 428	add_sequential(task);
 429	i->sequential = 0;
 430found:
 431	if (i->sequential + bio->bi_iter.bi_size > i->sequential)
 432		i->sequential	+= bio->bi_iter.bi_size;
 433
 434	i->last			 = bio_end_sector(bio);
 435	i->jiffies		 = jiffies + msecs_to_jiffies(5000);
 436	task->sequential_io	 = i->sequential;
 437
 438	hlist_del(&i->hash);
 439	hlist_add_head(&i->hash, iohash(dc, i->last));
 440	list_move_tail(&i->lru, &dc->io_lru);
 441
 442	spin_unlock(&dc->io_lock);
 443
 444	sectors = max(task->sequential_io,
 445		      task->sequential_io_avg) >> 9;
 446
 447	if (dc->sequential_cutoff &&
 448	    sectors >= dc->sequential_cutoff >> 9) {
 449		trace_bcache_bypass_sequential(bio);
 450		goto skip;
 451	}
 452
 453	if (congested && sectors >= congested) {
 454		trace_bcache_bypass_congested(bio);
 455		goto skip;
 456	}
 457
 458rescale:
 459	bch_rescale_priorities(c, bio_sectors(bio));
 460	return false;
 461skip:
 462	bch_mark_sectors_bypassed(c, dc, bio_sectors(bio));
 463	return true;
 464}
 465
 466/* Cache lookup */
 467
 468struct search {
 469	/* Stack frame for bio_complete */
 470	struct closure		cl;
 471
 472	struct bbio		bio;
 473	struct bio		*orig_bio;
 474	struct bio		*cache_miss;
 475	struct bcache_device	*d;
 476
 477	unsigned int		insert_bio_sectors;
 478	unsigned int		recoverable:1;
 479	unsigned int		write:1;
 480	unsigned int		read_dirty_data:1;
 481	unsigned int		cache_missed:1;
 482
 483	unsigned long		start_time;
 484
 485	struct btree_op		op;
 486	struct data_insert_op	iop;
 487};
 488
 489static void bch_cache_read_endio(struct bio *bio)
 490{
 491	struct bbio *b = container_of(bio, struct bbio, bio);
 492	struct closure *cl = bio->bi_private;
 493	struct search *s = container_of(cl, struct search, cl);
 494
 495	/*
 496	 * If the bucket was reused while our bio was in flight, we might have
 497	 * read the wrong data. Set s->error but not error so it doesn't get
 498	 * counted against the cache device, but we'll still reread the data
 499	 * from the backing device.
 500	 */
 501
 502	if (bio->bi_status)
 503		s->iop.status = bio->bi_status;
 504	else if (!KEY_DIRTY(&b->key) &&
 505		 ptr_stale(s->iop.c, &b->key, 0)) {
 506		atomic_long_inc(&s->iop.c->cache_read_races);
 507		s->iop.status = BLK_STS_IOERR;
 508	}
 509
 510	bch_bbio_endio(s->iop.c, bio, bio->bi_status, "reading from cache");
 511}
 512
 513/*
 514 * Read from a single key, handling the initial cache miss if the key starts in
 515 * the middle of the bio
 516 */
 517static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
 518{
 519	struct search *s = container_of(op, struct search, op);
 520	struct bio *n, *bio = &s->bio.bio;
 521	struct bkey *bio_key;
 522	unsigned int ptr;
 523
 524	if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0)
 525		return MAP_CONTINUE;
 526
 527	if (KEY_INODE(k) != s->iop.inode ||
 528	    KEY_START(k) > bio->bi_iter.bi_sector) {
 529		unsigned int bio_sectors = bio_sectors(bio);
 530		unsigned int sectors = KEY_INODE(k) == s->iop.inode
 531			? min_t(uint64_t, INT_MAX,
 532				KEY_START(k) - bio->bi_iter.bi_sector)
 533			: INT_MAX;
 534		int ret = s->d->cache_miss(b, s, bio, sectors);
 535
 
 536		if (ret != MAP_CONTINUE)
 537			return ret;
 538
 539		/* if this was a complete miss we shouldn't get here */
 540		BUG_ON(bio_sectors <= sectors);
 541	}
 542
 543	if (!KEY_SIZE(k))
 544		return MAP_CONTINUE;
 545
 546	/* XXX: figure out best pointer - for multiple cache devices */
 547	ptr = 0;
 548
 549	PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO;
 550
 551	if (KEY_DIRTY(k))
 552		s->read_dirty_data = true;
 553
 554	n = bio_next_split(bio, min_t(uint64_t, INT_MAX,
 555				      KEY_OFFSET(k) - bio->bi_iter.bi_sector),
 556			   GFP_NOIO, &s->d->bio_split);
 557
 558	bio_key = &container_of(n, struct bbio, bio)->key;
 559	bch_bkey_copy_single_ptr(bio_key, k, ptr);
 560
 561	bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key);
 562	bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key);
 563
 564	n->bi_end_io	= bch_cache_read_endio;
 565	n->bi_private	= &s->cl;
 566
 567	/*
 568	 * The bucket we're reading from might be reused while our bio
 569	 * is in flight, and we could then end up reading the wrong
 570	 * data.
 571	 *
 572	 * We guard against this by checking (in cache_read_endio()) if
 573	 * the pointer is stale again; if so, we treat it as an error
 574	 * and reread from the backing device (but we don't pass that
 575	 * error up anywhere).
 576	 */
 577
 578	__bch_submit_bbio(n, b->c);
 579	return n == bio ? MAP_DONE : MAP_CONTINUE;
 580}
 581
 582static void cache_lookup(struct closure *cl)
 583{
 584	struct search *s = container_of(cl, struct search, iop.cl);
 585	struct bio *bio = &s->bio.bio;
 586	struct cached_dev *dc;
 587	int ret;
 588
 589	bch_btree_op_init(&s->op, -1);
 590
 591	ret = bch_btree_map_keys(&s->op, s->iop.c,
 592				 &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0),
 593				 cache_lookup_fn, MAP_END_KEY);
 594	if (ret == -EAGAIN) {
 595		continue_at(cl, cache_lookup, bcache_wq);
 596		return;
 597	}
 598
 599	/*
 600	 * We might meet err when searching the btree, If that happens, we will
 601	 * get negative ret, in this scenario we should not recover data from
 602	 * backing device (when cache device is dirty) because we don't know
 603	 * whether bkeys the read request covered are all clean.
 604	 *
 605	 * And after that happened, s->iop.status is still its initial value
 606	 * before we submit s->bio.bio
 607	 */
 608	if (ret < 0) {
 609		BUG_ON(ret == -EINTR);
 610		if (s->d && s->d->c &&
 611				!UUID_FLASH_ONLY(&s->d->c->uuids[s->d->id])) {
 612			dc = container_of(s->d, struct cached_dev, disk);
 613			if (dc && atomic_read(&dc->has_dirty))
 614				s->recoverable = false;
 615		}
 616		if (!s->iop.status)
 617			s->iop.status = BLK_STS_IOERR;
 618	}
 619
 620	closure_return(cl);
 621}
 622
 623/* Common code for the make_request functions */
 624
 625static void request_endio(struct bio *bio)
 626{
 627	struct closure *cl = bio->bi_private;
 628
 629	if (bio->bi_status) {
 630		struct search *s = container_of(cl, struct search, cl);
 631
 632		s->iop.status = bio->bi_status;
 633		/* Only cache read errors are recoverable */
 634		s->recoverable = false;
 635	}
 636
 637	bio_put(bio);
 638	closure_put(cl);
 639}
 640
 641static void backing_request_endio(struct bio *bio)
 642{
 643	struct closure *cl = bio->bi_private;
 644
 645	if (bio->bi_status) {
 646		struct search *s = container_of(cl, struct search, cl);
 647		struct cached_dev *dc = container_of(s->d,
 648						     struct cached_dev, disk);
 649		/*
 650		 * If a bio has REQ_PREFLUSH for writeback mode, it is
 651		 * speically assembled in cached_dev_write() for a non-zero
 652		 * write request which has REQ_PREFLUSH. we don't set
 653		 * s->iop.status by this failure, the status will be decided
 654		 * by result of bch_data_insert() operation.
 655		 */
 656		if (unlikely(s->iop.writeback &&
 657			     bio->bi_opf & REQ_PREFLUSH)) {
 658			pr_err("Can't flush %s: returned bi_status %i",
 659				dc->backing_dev_name, bio->bi_status);
 660		} else {
 661			/* set to orig_bio->bi_status in bio_complete() */
 662			s->iop.status = bio->bi_status;
 663		}
 664		s->recoverable = false;
 665		/* should count I/O error for backing device here */
 666		bch_count_backing_io_errors(dc, bio);
 667	}
 668
 669	bio_put(bio);
 670	closure_put(cl);
 671}
 672
 673static void bio_complete(struct search *s)
 674{
 675	if (s->orig_bio) {
 676		generic_end_io_acct(s->d->disk->queue, bio_op(s->orig_bio),
 
 677				    &s->d->disk->part0, s->start_time);
 678
 679		trace_bcache_request_end(s->d, s->orig_bio);
 680		s->orig_bio->bi_status = s->iop.status;
 681		bio_endio(s->orig_bio);
 682		s->orig_bio = NULL;
 683	}
 684}
 685
 686static void do_bio_hook(struct search *s,
 687			struct bio *orig_bio,
 688			bio_end_io_t *end_io_fn)
 689{
 690	struct bio *bio = &s->bio.bio;
 691
 692	bio_init(bio, NULL, 0);
 693	__bio_clone_fast(bio, orig_bio);
 694	/*
 695	 * bi_end_io can be set separately somewhere else, e.g. the
 696	 * variants in,
 697	 * - cache_bio->bi_end_io from cached_dev_cache_miss()
 698	 * - n->bi_end_io from cache_lookup_fn()
 699	 */
 700	bio->bi_end_io		= end_io_fn;
 701	bio->bi_private		= &s->cl;
 702
 703	bio_cnt_set(bio, 3);
 704}
 705
 706static void search_free(struct closure *cl)
 707{
 708	struct search *s = container_of(cl, struct search, cl);
 709
 710	atomic_dec(&s->iop.c->search_inflight);
 711
 712	if (s->iop.bio)
 713		bio_put(s->iop.bio);
 714
 715	bio_complete(s);
 716	closure_debug_destroy(cl);
 717	mempool_free(s, &s->iop.c->search);
 718}
 719
 720static inline struct search *search_alloc(struct bio *bio,
 721					  struct bcache_device *d)
 722{
 723	struct search *s;
 724
 725	s = mempool_alloc(&d->c->search, GFP_NOIO);
 726
 727	closure_init(&s->cl, NULL);
 728	do_bio_hook(s, bio, request_endio);
 729	atomic_inc(&d->c->search_inflight);
 730
 731	s->orig_bio		= bio;
 732	s->cache_miss		= NULL;
 733	s->cache_missed		= 0;
 734	s->d			= d;
 735	s->recoverable		= 1;
 736	s->write		= op_is_write(bio_op(bio));
 737	s->read_dirty_data	= 0;
 738	s->start_time		= jiffies;
 739
 740	s->iop.c		= d->c;
 741	s->iop.bio		= NULL;
 742	s->iop.inode		= d->id;
 743	s->iop.write_point	= hash_long((unsigned long) current, 16);
 744	s->iop.write_prio	= 0;
 745	s->iop.status		= 0;
 746	s->iop.flags		= 0;
 747	s->iop.flush_journal	= op_is_flush(bio->bi_opf);
 748	s->iop.wq		= bcache_wq;
 749
 750	return s;
 751}
 752
 753/* Cached devices */
 754
 755static void cached_dev_bio_complete(struct closure *cl)
 756{
 757	struct search *s = container_of(cl, struct search, cl);
 758	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
 759
 760	cached_dev_put(dc);
 761	search_free(cl);
 
 762}
 763
 764/* Process reads */
 765
 766static void cached_dev_read_error_done(struct closure *cl)
 767{
 768	struct search *s = container_of(cl, struct search, cl);
 769
 770	if (s->iop.replace_collision)
 771		bch_mark_cache_miss_collision(s->iop.c, s->d);
 772
 773	if (s->iop.bio)
 774		bio_free_pages(s->iop.bio);
 775
 776	cached_dev_bio_complete(cl);
 777}
 778
 779static void cached_dev_read_error(struct closure *cl)
 780{
 781	struct search *s = container_of(cl, struct search, cl);
 782	struct bio *bio = &s->bio.bio;
 783
 784	/*
 785	 * If read request hit dirty data (s->read_dirty_data is true),
 786	 * then recovery a failed read request from cached device may
 787	 * get a stale data back. So read failure recovery is only
 788	 * permitted when read request hit clean data in cache device,
 789	 * or when cache read race happened.
 790	 */
 791	if (s->recoverable && !s->read_dirty_data) {
 792		/* Retry from the backing device: */
 793		trace_bcache_read_retry(s->orig_bio);
 794
 795		s->iop.status = 0;
 796		do_bio_hook(s, s->orig_bio, backing_request_endio);
 797
 798		/* XXX: invalidate cache */
 799
 800		/* I/O request sent to backing device */
 801		closure_bio_submit(s->iop.c, bio, cl);
 802	}
 803
 804	continue_at(cl, cached_dev_read_error_done, NULL);
 805}
 806
 807static void cached_dev_cache_miss_done(struct closure *cl)
 808{
 809	struct search *s = container_of(cl, struct search, cl);
 810	struct bcache_device *d = s->d;
 811
 812	if (s->iop.replace_collision)
 813		bch_mark_cache_miss_collision(s->iop.c, s->d);
 814
 815	if (s->iop.bio)
 816		bio_free_pages(s->iop.bio);
 817
 818	cached_dev_bio_complete(cl);
 819	closure_put(&d->cl);
 820}
 821
 822static void cached_dev_read_done(struct closure *cl)
 823{
 824	struct search *s = container_of(cl, struct search, cl);
 825	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
 826
 827	/*
 828	 * We had a cache miss; cache_bio now contains data ready to be inserted
 829	 * into the cache.
 830	 *
 831	 * First, we copy the data we just read from cache_bio's bounce buffers
 832	 * to the buffers the original bio pointed to:
 833	 */
 834
 835	if (s->iop.bio) {
 836		bio_reset(s->iop.bio);
 837		s->iop.bio->bi_iter.bi_sector =
 838			s->cache_miss->bi_iter.bi_sector;
 839		bio_copy_dev(s->iop.bio, s->cache_miss);
 840		s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
 841		bch_bio_map(s->iop.bio, NULL);
 842
 843		bio_copy_data(s->cache_miss, s->iop.bio);
 844
 845		bio_put(s->cache_miss);
 846		s->cache_miss = NULL;
 847	}
 848
 849	if (verify(dc) && s->recoverable && !s->read_dirty_data)
 850		bch_data_verify(dc, s->orig_bio);
 851
 852	closure_get(&dc->disk.cl);
 853	bio_complete(s);
 854
 855	if (s->iop.bio &&
 856	    !test_bit(CACHE_SET_STOPPING, &s->iop.c->flags)) {
 857		BUG_ON(!s->iop.replace);
 858		closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
 859	}
 860
 861	continue_at(cl, cached_dev_cache_miss_done, NULL);
 862}
 863
 864static void cached_dev_read_done_bh(struct closure *cl)
 865{
 866	struct search *s = container_of(cl, struct search, cl);
 867	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
 868
 869	bch_mark_cache_accounting(s->iop.c, s->d,
 870				  !s->cache_missed, s->iop.bypass);
 871	trace_bcache_read(s->orig_bio, !s->cache_missed, s->iop.bypass);
 872
 873	if (s->iop.status)
 874		continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq);
 875	else if (s->iop.bio || verify(dc))
 876		continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq);
 877	else
 878		continue_at_nobarrier(cl, cached_dev_bio_complete, NULL);
 879}
 880
 881static int cached_dev_cache_miss(struct btree *b, struct search *s,
 882				 struct bio *bio, unsigned int sectors)
 883{
 884	int ret = MAP_CONTINUE;
 885	unsigned int reada = 0;
 886	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
 887	struct bio *miss, *cache_bio;
 888
 889	s->cache_missed = 1;
 890
 891	if (s->cache_miss || s->iop.bypass) {
 892		miss = bio_next_split(bio, sectors, GFP_NOIO, &s->d->bio_split);
 893		ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
 894		goto out_submit;
 895	}
 896
 897	if (!(bio->bi_opf & REQ_RAHEAD) &&
 898	    !(bio->bi_opf & (REQ_META|REQ_PRIO)) &&
 899	    s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)
 900		reada = min_t(sector_t, dc->readahead >> 9,
 901			      get_capacity(bio->bi_disk) - bio_end_sector(bio));
 902
 903	s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada);
 904
 905	s->iop.replace_key = KEY(s->iop.inode,
 906				 bio->bi_iter.bi_sector + s->insert_bio_sectors,
 907				 s->insert_bio_sectors);
 908
 909	ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key);
 910	if (ret)
 911		return ret;
 912
 913	s->iop.replace = true;
 914
 915	miss = bio_next_split(bio, sectors, GFP_NOIO, &s->d->bio_split);
 916
 917	/* btree_search_recurse()'s btree iterator is no good anymore */
 918	ret = miss == bio ? MAP_DONE : -EINTR;
 919
 920	cache_bio = bio_alloc_bioset(GFP_NOWAIT,
 921			DIV_ROUND_UP(s->insert_bio_sectors, PAGE_SECTORS),
 922			&dc->disk.bio_split);
 923	if (!cache_bio)
 924		goto out_submit;
 925
 926	cache_bio->bi_iter.bi_sector	= miss->bi_iter.bi_sector;
 927	bio_copy_dev(cache_bio, miss);
 928	cache_bio->bi_iter.bi_size	= s->insert_bio_sectors << 9;
 929
 930	cache_bio->bi_end_io	= backing_request_endio;
 931	cache_bio->bi_private	= &s->cl;
 932
 933	bch_bio_map(cache_bio, NULL);
 934	if (bch_bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO))
 935		goto out_put;
 936
 937	if (reada)
 938		bch_mark_cache_readahead(s->iop.c, s->d);
 939
 940	s->cache_miss	= miss;
 941	s->iop.bio	= cache_bio;
 942	bio_get(cache_bio);
 943	/* I/O request sent to backing device */
 944	closure_bio_submit(s->iop.c, cache_bio, &s->cl);
 945
 946	return ret;
 947out_put:
 948	bio_put(cache_bio);
 949out_submit:
 950	miss->bi_end_io		= backing_request_endio;
 951	miss->bi_private	= &s->cl;
 952	/* I/O request sent to backing device */
 953	closure_bio_submit(s->iop.c, miss, &s->cl);
 954	return ret;
 955}
 956
 957static void cached_dev_read(struct cached_dev *dc, struct search *s)
 958{
 959	struct closure *cl = &s->cl;
 960
 961	closure_call(&s->iop.cl, cache_lookup, NULL, cl);
 962	continue_at(cl, cached_dev_read_done_bh, NULL);
 963}
 964
 965/* Process writes */
 966
 967static void cached_dev_write_complete(struct closure *cl)
 968{
 969	struct search *s = container_of(cl, struct search, cl);
 970	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
 971
 972	up_read_non_owner(&dc->writeback_lock);
 973	cached_dev_bio_complete(cl);
 974}
 975
 976static void cached_dev_write(struct cached_dev *dc, struct search *s)
 977{
 978	struct closure *cl = &s->cl;
 979	struct bio *bio = &s->bio.bio;
 980	struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0);
 981	struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0);
 982
 983	bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end);
 984
 985	down_read_non_owner(&dc->writeback_lock);
 986	if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) {
 987		/*
 988		 * We overlap with some dirty data undergoing background
 989		 * writeback, force this write to writeback
 990		 */
 991		s->iop.bypass = false;
 992		s->iop.writeback = true;
 993	}
 994
 995	/*
 996	 * Discards aren't _required_ to do anything, so skipping if
 997	 * check_overlapping returned true is ok
 998	 *
 999	 * But check_overlapping drops dirty keys for which io hasn't started,
1000	 * so we still want to call it.
1001	 */
1002	if (bio_op(bio) == REQ_OP_DISCARD)
1003		s->iop.bypass = true;
1004
1005	if (should_writeback(dc, s->orig_bio,
1006			     cache_mode(dc),
1007			     s->iop.bypass)) {
1008		s->iop.bypass = false;
1009		s->iop.writeback = true;
1010	}
1011
1012	if (s->iop.bypass) {
1013		s->iop.bio = s->orig_bio;
1014		bio_get(s->iop.bio);
1015
1016		if (bio_op(bio) == REQ_OP_DISCARD &&
1017		    !blk_queue_discard(bdev_get_queue(dc->bdev)))
1018			goto insert_data;
1019
1020		/* I/O request sent to backing device */
1021		bio->bi_end_io = backing_request_endio;
1022		closure_bio_submit(s->iop.c, bio, cl);
1023
1024	} else if (s->iop.writeback) {
1025		bch_writeback_add(dc);
1026		s->iop.bio = bio;
1027
1028		if (bio->bi_opf & REQ_PREFLUSH) {
1029			/*
1030			 * Also need to send a flush to the backing
1031			 * device.
1032			 */
1033			struct bio *flush;
1034
1035			flush = bio_alloc_bioset(GFP_NOIO, 0,
1036						 &dc->disk.bio_split);
1037			if (!flush) {
1038				s->iop.status = BLK_STS_RESOURCE;
1039				goto insert_data;
1040			}
1041			bio_copy_dev(flush, bio);
1042			flush->bi_end_io = backing_request_endio;
1043			flush->bi_private = cl;
1044			flush->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
1045			/* I/O request sent to backing device */
1046			closure_bio_submit(s->iop.c, flush, cl);
1047		}
1048	} else {
1049		s->iop.bio = bio_clone_fast(bio, GFP_NOIO, &dc->disk.bio_split);
1050		/* I/O request sent to backing device */
1051		bio->bi_end_io = backing_request_endio;
1052		closure_bio_submit(s->iop.c, bio, cl);
1053	}
1054
1055insert_data:
1056	closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
1057	continue_at(cl, cached_dev_write_complete, NULL);
1058}
1059
1060static void cached_dev_nodata(struct closure *cl)
1061{
1062	struct search *s = container_of(cl, struct search, cl);
1063	struct bio *bio = &s->bio.bio;
1064
1065	if (s->iop.flush_journal)
1066		bch_journal_meta(s->iop.c, cl);
1067
1068	/* If it's a flush, we send the flush to the backing device too */
1069	bio->bi_end_io = backing_request_endio;
1070	closure_bio_submit(s->iop.c, bio, cl);
1071
1072	continue_at(cl, cached_dev_bio_complete, NULL);
1073}
1074
1075struct detached_dev_io_private {
1076	struct bcache_device	*d;
1077	unsigned long		start_time;
1078	bio_end_io_t		*bi_end_io;
1079	void			*bi_private;
1080};
1081
1082static void detached_dev_end_io(struct bio *bio)
1083{
1084	struct detached_dev_io_private *ddip;
1085
1086	ddip = bio->bi_private;
1087	bio->bi_end_io = ddip->bi_end_io;
1088	bio->bi_private = ddip->bi_private;
1089
1090	generic_end_io_acct(ddip->d->disk->queue, bio_op(bio),
 
1091			    &ddip->d->disk->part0, ddip->start_time);
1092
1093	if (bio->bi_status) {
1094		struct cached_dev *dc = container_of(ddip->d,
1095						     struct cached_dev, disk);
1096		/* should count I/O error for backing device here */
1097		bch_count_backing_io_errors(dc, bio);
1098	}
1099
1100	kfree(ddip);
1101	bio->bi_end_io(bio);
1102}
1103
1104static void detached_dev_do_request(struct bcache_device *d, struct bio *bio)
1105{
1106	struct detached_dev_io_private *ddip;
1107	struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1108
1109	/*
1110	 * no need to call closure_get(&dc->disk.cl),
1111	 * because upper layer had already opened bcache device,
1112	 * which would call closure_get(&dc->disk.cl)
1113	 */
1114	ddip = kzalloc(sizeof(struct detached_dev_io_private), GFP_NOIO);
1115	ddip->d = d;
1116	ddip->start_time = jiffies;
1117	ddip->bi_end_io = bio->bi_end_io;
1118	ddip->bi_private = bio->bi_private;
1119	bio->bi_end_io = detached_dev_end_io;
1120	bio->bi_private = ddip;
1121
1122	if ((bio_op(bio) == REQ_OP_DISCARD) &&
1123	    !blk_queue_discard(bdev_get_queue(dc->bdev)))
1124		bio->bi_end_io(bio);
1125	else
1126		generic_make_request(bio);
1127}
1128
1129static void quit_max_writeback_rate(struct cache_set *c,
1130				    struct cached_dev *this_dc)
1131{
1132	int i;
1133	struct bcache_device *d;
1134	struct cached_dev *dc;
1135
1136	/*
1137	 * mutex bch_register_lock may compete with other parallel requesters,
1138	 * or attach/detach operations on other backing device. Waiting to
1139	 * the mutex lock may increase I/O request latency for seconds or more.
1140	 * To avoid such situation, if mutext_trylock() failed, only writeback
1141	 * rate of current cached device is set to 1, and __update_write_back()
1142	 * will decide writeback rate of other cached devices (remember now
1143	 * c->idle_counter is 0 already).
1144	 */
1145	if (mutex_trylock(&bch_register_lock)) {
1146		for (i = 0; i < c->devices_max_used; i++) {
1147			if (!c->devices[i])
1148				continue;
1149
1150			if (UUID_FLASH_ONLY(&c->uuids[i]))
1151				continue;
1152
1153			d = c->devices[i];
1154			dc = container_of(d, struct cached_dev, disk);
1155			/*
1156			 * set writeback rate to default minimum value,
1157			 * then let update_writeback_rate() to decide the
1158			 * upcoming rate.
1159			 */
1160			atomic_long_set(&dc->writeback_rate.rate, 1);
1161		}
1162		mutex_unlock(&bch_register_lock);
1163	} else
1164		atomic_long_set(&this_dc->writeback_rate.rate, 1);
1165}
1166
1167/* Cached devices - read & write stuff */
1168
1169static blk_qc_t cached_dev_make_request(struct request_queue *q,
1170					struct bio *bio)
1171{
1172	struct search *s;
1173	struct bcache_device *d = bio->bi_disk->private_data;
1174	struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1175	int rw = bio_data_dir(bio);
1176
1177	if (unlikely((d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags)) ||
1178		     dc->io_disable)) {
1179		bio->bi_status = BLK_STS_IOERR;
1180		bio_endio(bio);
1181		return BLK_QC_T_NONE;
1182	}
1183
1184	if (likely(d->c)) {
1185		if (atomic_read(&d->c->idle_counter))
1186			atomic_set(&d->c->idle_counter, 0);
1187		/*
1188		 * If at_max_writeback_rate of cache set is true and new I/O
1189		 * comes, quit max writeback rate of all cached devices
1190		 * attached to this cache set, and set at_max_writeback_rate
1191		 * to false.
1192		 */
1193		if (unlikely(atomic_read(&d->c->at_max_writeback_rate) == 1)) {
1194			atomic_set(&d->c->at_max_writeback_rate, 0);
1195			quit_max_writeback_rate(d->c, dc);
1196		}
1197	}
1198
1199	generic_start_io_acct(q,
1200			      bio_op(bio),
1201			      bio_sectors(bio),
1202			      &d->disk->part0);
1203
1204	bio_set_dev(bio, dc->bdev);
1205	bio->bi_iter.bi_sector += dc->sb.data_offset;
1206
1207	if (cached_dev_get(dc)) {
1208		s = search_alloc(bio, d);
1209		trace_bcache_request_start(s->d, bio);
1210
1211		if (!bio->bi_iter.bi_size) {
1212			/*
1213			 * can't call bch_journal_meta from under
1214			 * generic_make_request
1215			 */
1216			continue_at_nobarrier(&s->cl,
1217					      cached_dev_nodata,
1218					      bcache_wq);
1219		} else {
1220			s->iop.bypass = check_should_bypass(dc, bio);
1221
1222			if (rw)
1223				cached_dev_write(dc, s);
1224			else
1225				cached_dev_read(dc, s);
1226		}
1227	} else
1228		/* I/O request sent to backing device */
1229		detached_dev_do_request(d, bio);
1230
1231	return BLK_QC_T_NONE;
1232}
1233
1234static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode,
1235			    unsigned int cmd, unsigned long arg)
1236{
1237	struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1238
1239	if (dc->io_disable)
1240		return -EIO;
1241
1242	return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg);
1243}
1244
1245static int cached_dev_congested(void *data, int bits)
1246{
1247	struct bcache_device *d = data;
1248	struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1249	struct request_queue *q = bdev_get_queue(dc->bdev);
1250	int ret = 0;
1251
1252	if (bdi_congested(q->backing_dev_info, bits))
1253		return 1;
1254
1255	if (cached_dev_get(dc)) {
1256		unsigned int i;
1257		struct cache *ca;
1258
1259		for_each_cache(ca, d->c, i) {
1260			q = bdev_get_queue(ca->bdev);
1261			ret |= bdi_congested(q->backing_dev_info, bits);
1262		}
1263
1264		cached_dev_put(dc);
1265	}
1266
1267	return ret;
1268}
1269
1270void bch_cached_dev_request_init(struct cached_dev *dc)
1271{
1272	struct gendisk *g = dc->disk.disk;
1273
1274	g->queue->make_request_fn		= cached_dev_make_request;
1275	g->queue->backing_dev_info->congested_fn = cached_dev_congested;
1276	dc->disk.cache_miss			= cached_dev_cache_miss;
1277	dc->disk.ioctl				= cached_dev_ioctl;
1278}
1279
1280/* Flash backed devices */
1281
1282static int flash_dev_cache_miss(struct btree *b, struct search *s,
1283				struct bio *bio, unsigned int sectors)
1284{
1285	unsigned int bytes = min(sectors, bio_sectors(bio)) << 9;
1286
1287	swap(bio->bi_iter.bi_size, bytes);
1288	zero_fill_bio(bio);
1289	swap(bio->bi_iter.bi_size, bytes);
1290
1291	bio_advance(bio, bytes);
1292
1293	if (!bio->bi_iter.bi_size)
1294		return MAP_DONE;
1295
1296	return MAP_CONTINUE;
1297}
1298
1299static void flash_dev_nodata(struct closure *cl)
1300{
1301	struct search *s = container_of(cl, struct search, cl);
1302
1303	if (s->iop.flush_journal)
1304		bch_journal_meta(s->iop.c, cl);
1305
1306	continue_at(cl, search_free, NULL);
1307}
1308
1309static blk_qc_t flash_dev_make_request(struct request_queue *q,
1310					     struct bio *bio)
1311{
1312	struct search *s;
1313	struct closure *cl;
1314	struct bcache_device *d = bio->bi_disk->private_data;
 
1315
1316	if (unlikely(d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags))) {
1317		bio->bi_status = BLK_STS_IOERR;
1318		bio_endio(bio);
1319		return BLK_QC_T_NONE;
1320	}
1321
1322	generic_start_io_acct(q, bio_op(bio), bio_sectors(bio), &d->disk->part0);
1323
1324	s = search_alloc(bio, d);
1325	cl = &s->cl;
1326	bio = &s->bio.bio;
1327
1328	trace_bcache_request_start(s->d, bio);
1329
1330	if (!bio->bi_iter.bi_size) {
1331		/*
1332		 * can't call bch_journal_meta from under
1333		 * generic_make_request
1334		 */
1335		continue_at_nobarrier(&s->cl,
1336				      flash_dev_nodata,
1337				      bcache_wq);
1338		return BLK_QC_T_NONE;
1339	} else if (bio_data_dir(bio)) {
1340		bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
1341					&KEY(d->id, bio->bi_iter.bi_sector, 0),
1342					&KEY(d->id, bio_end_sector(bio), 0));
1343
1344		s->iop.bypass		= (bio_op(bio) == REQ_OP_DISCARD) != 0;
1345		s->iop.writeback	= true;
1346		s->iop.bio		= bio;
1347
1348		closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
1349	} else {
1350		closure_call(&s->iop.cl, cache_lookup, NULL, cl);
1351	}
1352
1353	continue_at(cl, search_free, NULL);
1354	return BLK_QC_T_NONE;
1355}
1356
1357static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode,
1358			   unsigned int cmd, unsigned long arg)
1359{
1360	return -ENOTTY;
1361}
1362
1363static int flash_dev_congested(void *data, int bits)
1364{
1365	struct bcache_device *d = data;
1366	struct request_queue *q;
1367	struct cache *ca;
1368	unsigned int i;
1369	int ret = 0;
1370
1371	for_each_cache(ca, d->c, i) {
1372		q = bdev_get_queue(ca->bdev);
1373		ret |= bdi_congested(q->backing_dev_info, bits);
1374	}
1375
1376	return ret;
1377}
1378
1379void bch_flash_dev_request_init(struct bcache_device *d)
1380{
1381	struct gendisk *g = d->disk;
1382
1383	g->queue->make_request_fn		= flash_dev_make_request;
1384	g->queue->backing_dev_info->congested_fn = flash_dev_congested;
1385	d->cache_miss				= flash_dev_cache_miss;
1386	d->ioctl				= flash_dev_ioctl;
1387}
1388
1389void bch_request_exit(void)
1390{
1391	kmem_cache_destroy(bch_search_cache);
 
1392}
1393
1394int __init bch_request_init(void)
1395{
1396	bch_search_cache = KMEM_CACHE(search, 0);
1397	if (!bch_search_cache)
1398		return -ENOMEM;
1399
1400	return 0;
1401}
v4.17
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Main bcache entry point - handle a read or a write request and decide what to
   4 * do with it; the make_request functions are called by the block layer.
   5 *
   6 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
   7 * Copyright 2012 Google, Inc.
   8 */
   9
  10#include "bcache.h"
  11#include "btree.h"
  12#include "debug.h"
  13#include "request.h"
  14#include "writeback.h"
  15
  16#include <linux/module.h>
  17#include <linux/hash.h>
  18#include <linux/random.h>
  19#include <linux/backing-dev.h>
  20
  21#include <trace/events/bcache.h>
  22
  23#define CUTOFF_CACHE_ADD	95
  24#define CUTOFF_CACHE_READA	90
  25
  26struct kmem_cache *bch_search_cache;
  27
  28static void bch_data_insert_start(struct closure *);
  29
  30static unsigned cache_mode(struct cached_dev *dc)
  31{
  32	return BDEV_CACHE_MODE(&dc->sb);
  33}
  34
  35static bool verify(struct cached_dev *dc)
  36{
  37	return dc->verify;
  38}
  39
  40static void bio_csum(struct bio *bio, struct bkey *k)
  41{
  42	struct bio_vec bv;
  43	struct bvec_iter iter;
  44	uint64_t csum = 0;
  45
  46	bio_for_each_segment(bv, bio, iter) {
  47		void *d = kmap(bv.bv_page) + bv.bv_offset;
 
  48		csum = bch_crc64_update(csum, d, bv.bv_len);
  49		kunmap(bv.bv_page);
  50	}
  51
  52	k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1);
  53}
  54
  55/* Insert data into cache */
  56
  57static void bch_data_insert_keys(struct closure *cl)
  58{
  59	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
  60	atomic_t *journal_ref = NULL;
  61	struct bkey *replace_key = op->replace ? &op->replace_key : NULL;
  62	int ret;
  63
  64	/*
  65	 * If we're looping, might already be waiting on
  66	 * another journal write - can't wait on more than one journal write at
  67	 * a time
  68	 *
  69	 * XXX: this looks wrong
  70	 */
  71#if 0
  72	while (atomic_read(&s->cl.remaining) & CLOSURE_WAITING)
  73		closure_sync(&s->cl);
  74#endif
  75
  76	if (!op->replace)
  77		journal_ref = bch_journal(op->c, &op->insert_keys,
  78					  op->flush_journal ? cl : NULL);
  79
  80	ret = bch_btree_insert(op->c, &op->insert_keys,
  81			       journal_ref, replace_key);
  82	if (ret == -ESRCH) {
  83		op->replace_collision = true;
  84	} else if (ret) {
  85		op->status		= BLK_STS_RESOURCE;
  86		op->insert_data_done	= true;
  87	}
  88
  89	if (journal_ref)
  90		atomic_dec_bug(journal_ref);
  91
  92	if (!op->insert_data_done) {
  93		continue_at(cl, bch_data_insert_start, op->wq);
  94		return;
  95	}
  96
  97	bch_keylist_free(&op->insert_keys);
  98	closure_return(cl);
  99}
 100
 101static int bch_keylist_realloc(struct keylist *l, unsigned u64s,
 102			       struct cache_set *c)
 103{
 104	size_t oldsize = bch_keylist_nkeys(l);
 105	size_t newsize = oldsize + u64s;
 106
 107	/*
 108	 * The journalling code doesn't handle the case where the keys to insert
 109	 * is bigger than an empty write: If we just return -ENOMEM here,
 110	 * bio_insert() and bio_invalidate() will insert the keys created so far
 111	 * and finish the rest when the keylist is empty.
 112	 */
 113	if (newsize * sizeof(uint64_t) > block_bytes(c) - sizeof(struct jset))
 114		return -ENOMEM;
 115
 116	return __bch_keylist_realloc(l, u64s);
 117}
 118
 119static void bch_data_invalidate(struct closure *cl)
 120{
 121	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
 122	struct bio *bio = op->bio;
 123
 124	pr_debug("invalidating %i sectors from %llu",
 125		 bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector);
 126
 127	while (bio_sectors(bio)) {
 128		unsigned sectors = min(bio_sectors(bio),
 129				       1U << (KEY_SIZE_BITS - 1));
 130
 131		if (bch_keylist_realloc(&op->insert_keys, 2, op->c))
 132			goto out;
 133
 134		bio->bi_iter.bi_sector	+= sectors;
 135		bio->bi_iter.bi_size	-= sectors << 9;
 136
 137		bch_keylist_add(&op->insert_keys,
 138				&KEY(op->inode, bio->bi_iter.bi_sector, sectors));
 
 
 139	}
 140
 141	op->insert_data_done = true;
 142	/* get in bch_data_insert() */
 143	bio_put(bio);
 144out:
 145	continue_at(cl, bch_data_insert_keys, op->wq);
 146}
 147
 148static void bch_data_insert_error(struct closure *cl)
 149{
 150	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
 151
 152	/*
 153	 * Our data write just errored, which means we've got a bunch of keys to
 154	 * insert that point to data that wasn't succesfully written.
 155	 *
 156	 * We don't have to insert those keys but we still have to invalidate
 157	 * that region of the cache - so, if we just strip off all the pointers
 158	 * from the keys we'll accomplish just that.
 159	 */
 160
 161	struct bkey *src = op->insert_keys.keys, *dst = op->insert_keys.keys;
 162
 163	while (src != op->insert_keys.top) {
 164		struct bkey *n = bkey_next(src);
 165
 166		SET_KEY_PTRS(src, 0);
 167		memmove(dst, src, bkey_bytes(src));
 168
 169		dst = bkey_next(dst);
 170		src = n;
 171	}
 172
 173	op->insert_keys.top = dst;
 174
 175	bch_data_insert_keys(cl);
 176}
 177
 178static void bch_data_insert_endio(struct bio *bio)
 179{
 180	struct closure *cl = bio->bi_private;
 181	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
 182
 183	if (bio->bi_status) {
 184		/* TODO: We could try to recover from this. */
 185		if (op->writeback)
 186			op->status = bio->bi_status;
 187		else if (!op->replace)
 188			set_closure_fn(cl, bch_data_insert_error, op->wq);
 189		else
 190			set_closure_fn(cl, NULL, NULL);
 191	}
 192
 193	bch_bbio_endio(op->c, bio, bio->bi_status, "writing data to cache");
 194}
 195
 196static void bch_data_insert_start(struct closure *cl)
 197{
 198	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
 199	struct bio *bio = op->bio, *n;
 200
 201	if (op->bypass)
 202		return bch_data_invalidate(cl);
 203
 204	if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0)
 205		wake_up_gc(op->c);
 206
 207	/*
 208	 * Journal writes are marked REQ_PREFLUSH; if the original write was a
 209	 * flush, it'll wait on the journal write.
 210	 */
 211	bio->bi_opf &= ~(REQ_PREFLUSH|REQ_FUA);
 212
 213	do {
 214		unsigned i;
 215		struct bkey *k;
 216		struct bio_set *split = op->c->bio_split;
 217
 218		/* 1 for the device pointer and 1 for the chksum */
 219		if (bch_keylist_realloc(&op->insert_keys,
 220					3 + (op->csum ? 1 : 0),
 221					op->c)) {
 222			continue_at(cl, bch_data_insert_keys, op->wq);
 223			return;
 224		}
 225
 226		k = op->insert_keys.top;
 227		bkey_init(k);
 228		SET_KEY_INODE(k, op->inode);
 229		SET_KEY_OFFSET(k, bio->bi_iter.bi_sector);
 230
 231		if (!bch_alloc_sectors(op->c, k, bio_sectors(bio),
 232				       op->write_point, op->write_prio,
 233				       op->writeback))
 234			goto err;
 235
 236		n = bio_next_split(bio, KEY_SIZE(k), GFP_NOIO, split);
 237
 238		n->bi_end_io	= bch_data_insert_endio;
 239		n->bi_private	= cl;
 240
 241		if (op->writeback) {
 242			SET_KEY_DIRTY(k, true);
 243
 244			for (i = 0; i < KEY_PTRS(k); i++)
 245				SET_GC_MARK(PTR_BUCKET(op->c, k, i),
 246					    GC_MARK_DIRTY);
 247		}
 248
 249		SET_KEY_CSUM(k, op->csum);
 250		if (KEY_CSUM(k))
 251			bio_csum(n, k);
 252
 253		trace_bcache_cache_insert(k);
 254		bch_keylist_push(&op->insert_keys);
 255
 256		bio_set_op_attrs(n, REQ_OP_WRITE, 0);
 257		bch_submit_bbio(n, op->c, k, 0);
 258	} while (n != bio);
 259
 260	op->insert_data_done = true;
 261	continue_at(cl, bch_data_insert_keys, op->wq);
 262	return;
 263err:
 264	/* bch_alloc_sectors() blocks if s->writeback = true */
 265	BUG_ON(op->writeback);
 266
 267	/*
 268	 * But if it's not a writeback write we'd rather just bail out if
 269	 * there aren't any buckets ready to write to - it might take awhile and
 270	 * we might be starving btree writes for gc or something.
 271	 */
 272
 273	if (!op->replace) {
 274		/*
 275		 * Writethrough write: We can't complete the write until we've
 276		 * updated the index. But we don't want to delay the write while
 277		 * we wait for buckets to be freed up, so just invalidate the
 278		 * rest of the write.
 279		 */
 280		op->bypass = true;
 281		return bch_data_invalidate(cl);
 282	} else {
 283		/*
 284		 * From a cache miss, we can just insert the keys for the data
 285		 * we have written or bail out if we didn't do anything.
 286		 */
 287		op->insert_data_done = true;
 288		bio_put(bio);
 289
 290		if (!bch_keylist_empty(&op->insert_keys))
 291			continue_at(cl, bch_data_insert_keys, op->wq);
 292		else
 293			closure_return(cl);
 294	}
 295}
 296
 297/**
 298 * bch_data_insert - stick some data in the cache
 299 * @cl: closure pointer.
 300 *
 301 * This is the starting point for any data to end up in a cache device; it could
 302 * be from a normal write, or a writeback write, or a write to a flash only
 303 * volume - it's also used by the moving garbage collector to compact data in
 304 * mostly empty buckets.
 305 *
 306 * It first writes the data to the cache, creating a list of keys to be inserted
 307 * (if the data had to be fragmented there will be multiple keys); after the
 308 * data is written it calls bch_journal, and after the keys have been added to
 309 * the next journal write they're inserted into the btree.
 310 *
 311 * It inserts the data in s->cache_bio; bi_sector is used for the key offset,
 312 * and op->inode is used for the key inode.
 313 *
 314 * If s->bypass is true, instead of inserting the data it invalidates the
 315 * region of the cache represented by s->cache_bio and op->inode.
 316 */
 317void bch_data_insert(struct closure *cl)
 318{
 319	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
 320
 321	trace_bcache_write(op->c, op->inode, op->bio,
 322			   op->writeback, op->bypass);
 323
 324	bch_keylist_init(&op->insert_keys);
 325	bio_get(op->bio);
 326	bch_data_insert_start(cl);
 327}
 328
 329/* Congested? */
 330
 331unsigned bch_get_congested(struct cache_set *c)
 
 
 332{
 333	int i;
 334	long rand;
 335
 336	if (!c->congested_read_threshold_us &&
 337	    !c->congested_write_threshold_us)
 338		return 0;
 339
 340	i = (local_clock_us() - c->congested_last_us) / 1024;
 341	if (i < 0)
 342		return 0;
 343
 344	i += atomic_read(&c->congested);
 345	if (i >= 0)
 346		return 0;
 347
 348	i += CONGESTED_MAX;
 349
 350	if (i > 0)
 351		i = fract_exp_two(i, 6);
 352
 353	rand = get_random_int();
 354	i -= bitmap_weight(&rand, BITS_PER_LONG);
 355
 356	return i > 0 ? i : 1;
 357}
 358
 359static void add_sequential(struct task_struct *t)
 360{
 361	ewma_add(t->sequential_io_avg,
 362		 t->sequential_io, 8, 0);
 363
 364	t->sequential_io = 0;
 365}
 366
 367static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)
 368{
 369	return &dc->io_hash[hash_64(k, RECENT_IO_BITS)];
 370}
 371
 372static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
 373{
 374	struct cache_set *c = dc->disk.c;
 375	unsigned mode = cache_mode(dc);
 376	unsigned sectors, congested = bch_get_congested(c);
 377	struct task_struct *task = current;
 378	struct io *i;
 379
 380	if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
 381	    c->gc_stats.in_use > CUTOFF_CACHE_ADD ||
 382	    (bio_op(bio) == REQ_OP_DISCARD))
 383		goto skip;
 384
 385	if (mode == CACHE_MODE_NONE ||
 386	    (mode == CACHE_MODE_WRITEAROUND &&
 387	     op_is_write(bio_op(bio))))
 388		goto skip;
 389
 390	/*
 391	 * Flag for bypass if the IO is for read-ahead or background,
 392	 * unless the read-ahead request is for metadata (eg, for gfs2).
 
 393	 */
 394	if (bio->bi_opf & (REQ_RAHEAD|REQ_BACKGROUND) &&
 395	    !(bio->bi_opf & REQ_META))
 396		goto skip;
 397
 398	if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
 399	    bio_sectors(bio) & (c->sb.block_size - 1)) {
 400		pr_debug("skipping unaligned io");
 401		goto skip;
 402	}
 403
 404	if (bypass_torture_test(dc)) {
 405		if ((get_random_int() & 3) == 3)
 406			goto skip;
 407		else
 408			goto rescale;
 409	}
 410
 
 411	if (!congested && !dc->sequential_cutoff)
 412		goto rescale;
 413
 414	spin_lock(&dc->io_lock);
 415
 416	hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash)
 417		if (i->last == bio->bi_iter.bi_sector &&
 418		    time_before(jiffies, i->jiffies))
 419			goto found;
 420
 421	i = list_first_entry(&dc->io_lru, struct io, lru);
 422
 423	add_sequential(task);
 424	i->sequential = 0;
 425found:
 426	if (i->sequential + bio->bi_iter.bi_size > i->sequential)
 427		i->sequential	+= bio->bi_iter.bi_size;
 428
 429	i->last			 = bio_end_sector(bio);
 430	i->jiffies		 = jiffies + msecs_to_jiffies(5000);
 431	task->sequential_io	 = i->sequential;
 432
 433	hlist_del(&i->hash);
 434	hlist_add_head(&i->hash, iohash(dc, i->last));
 435	list_move_tail(&i->lru, &dc->io_lru);
 436
 437	spin_unlock(&dc->io_lock);
 438
 439	sectors = max(task->sequential_io,
 440		      task->sequential_io_avg) >> 9;
 441
 442	if (dc->sequential_cutoff &&
 443	    sectors >= dc->sequential_cutoff >> 9) {
 444		trace_bcache_bypass_sequential(bio);
 445		goto skip;
 446	}
 447
 448	if (congested && sectors >= congested) {
 449		trace_bcache_bypass_congested(bio);
 450		goto skip;
 451	}
 452
 453rescale:
 454	bch_rescale_priorities(c, bio_sectors(bio));
 455	return false;
 456skip:
 457	bch_mark_sectors_bypassed(c, dc, bio_sectors(bio));
 458	return true;
 459}
 460
 461/* Cache lookup */
 462
 463struct search {
 464	/* Stack frame for bio_complete */
 465	struct closure		cl;
 466
 467	struct bbio		bio;
 468	struct bio		*orig_bio;
 469	struct bio		*cache_miss;
 470	struct bcache_device	*d;
 471
 472	unsigned		insert_bio_sectors;
 473	unsigned		recoverable:1;
 474	unsigned		write:1;
 475	unsigned		read_dirty_data:1;
 476	unsigned		cache_missed:1;
 477
 478	unsigned long		start_time;
 479
 480	struct btree_op		op;
 481	struct data_insert_op	iop;
 482};
 483
 484static void bch_cache_read_endio(struct bio *bio)
 485{
 486	struct bbio *b = container_of(bio, struct bbio, bio);
 487	struct closure *cl = bio->bi_private;
 488	struct search *s = container_of(cl, struct search, cl);
 489
 490	/*
 491	 * If the bucket was reused while our bio was in flight, we might have
 492	 * read the wrong data. Set s->error but not error so it doesn't get
 493	 * counted against the cache device, but we'll still reread the data
 494	 * from the backing device.
 495	 */
 496
 497	if (bio->bi_status)
 498		s->iop.status = bio->bi_status;
 499	else if (!KEY_DIRTY(&b->key) &&
 500		 ptr_stale(s->iop.c, &b->key, 0)) {
 501		atomic_long_inc(&s->iop.c->cache_read_races);
 502		s->iop.status = BLK_STS_IOERR;
 503	}
 504
 505	bch_bbio_endio(s->iop.c, bio, bio->bi_status, "reading from cache");
 506}
 507
 508/*
 509 * Read from a single key, handling the initial cache miss if the key starts in
 510 * the middle of the bio
 511 */
 512static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
 513{
 514	struct search *s = container_of(op, struct search, op);
 515	struct bio *n, *bio = &s->bio.bio;
 516	struct bkey *bio_key;
 517	unsigned ptr;
 518
 519	if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0)
 520		return MAP_CONTINUE;
 521
 522	if (KEY_INODE(k) != s->iop.inode ||
 523	    KEY_START(k) > bio->bi_iter.bi_sector) {
 524		unsigned bio_sectors = bio_sectors(bio);
 525		unsigned sectors = KEY_INODE(k) == s->iop.inode
 526			? min_t(uint64_t, INT_MAX,
 527				KEY_START(k) - bio->bi_iter.bi_sector)
 528			: INT_MAX;
 
 529
 530		int ret = s->d->cache_miss(b, s, bio, sectors);
 531		if (ret != MAP_CONTINUE)
 532			return ret;
 533
 534		/* if this was a complete miss we shouldn't get here */
 535		BUG_ON(bio_sectors <= sectors);
 536	}
 537
 538	if (!KEY_SIZE(k))
 539		return MAP_CONTINUE;
 540
 541	/* XXX: figure out best pointer - for multiple cache devices */
 542	ptr = 0;
 543
 544	PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO;
 545
 546	if (KEY_DIRTY(k))
 547		s->read_dirty_data = true;
 548
 549	n = bio_next_split(bio, min_t(uint64_t, INT_MAX,
 550				      KEY_OFFSET(k) - bio->bi_iter.bi_sector),
 551			   GFP_NOIO, s->d->bio_split);
 552
 553	bio_key = &container_of(n, struct bbio, bio)->key;
 554	bch_bkey_copy_single_ptr(bio_key, k, ptr);
 555
 556	bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key);
 557	bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key);
 558
 559	n->bi_end_io	= bch_cache_read_endio;
 560	n->bi_private	= &s->cl;
 561
 562	/*
 563	 * The bucket we're reading from might be reused while our bio
 564	 * is in flight, and we could then end up reading the wrong
 565	 * data.
 566	 *
 567	 * We guard against this by checking (in cache_read_endio()) if
 568	 * the pointer is stale again; if so, we treat it as an error
 569	 * and reread from the backing device (but we don't pass that
 570	 * error up anywhere).
 571	 */
 572
 573	__bch_submit_bbio(n, b->c);
 574	return n == bio ? MAP_DONE : MAP_CONTINUE;
 575}
 576
 577static void cache_lookup(struct closure *cl)
 578{
 579	struct search *s = container_of(cl, struct search, iop.cl);
 580	struct bio *bio = &s->bio.bio;
 581	struct cached_dev *dc;
 582	int ret;
 583
 584	bch_btree_op_init(&s->op, -1);
 585
 586	ret = bch_btree_map_keys(&s->op, s->iop.c,
 587				 &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0),
 588				 cache_lookup_fn, MAP_END_KEY);
 589	if (ret == -EAGAIN) {
 590		continue_at(cl, cache_lookup, bcache_wq);
 591		return;
 592	}
 593
 594	/*
 595	 * We might meet err when searching the btree, If that happens, we will
 596	 * get negative ret, in this scenario we should not recover data from
 597	 * backing device (when cache device is dirty) because we don't know
 598	 * whether bkeys the read request covered are all clean.
 599	 *
 600	 * And after that happened, s->iop.status is still its initial value
 601	 * before we submit s->bio.bio
 602	 */
 603	if (ret < 0) {
 604		BUG_ON(ret == -EINTR);
 605		if (s->d && s->d->c &&
 606				!UUID_FLASH_ONLY(&s->d->c->uuids[s->d->id])) {
 607			dc = container_of(s->d, struct cached_dev, disk);
 608			if (dc && atomic_read(&dc->has_dirty))
 609				s->recoverable = false;
 610		}
 611		if (!s->iop.status)
 612			s->iop.status = BLK_STS_IOERR;
 613	}
 614
 615	closure_return(cl);
 616}
 617
 618/* Common code for the make_request functions */
 619
 620static void request_endio(struct bio *bio)
 621{
 622	struct closure *cl = bio->bi_private;
 623
 624	if (bio->bi_status) {
 625		struct search *s = container_of(cl, struct search, cl);
 
 626		s->iop.status = bio->bi_status;
 627		/* Only cache read errors are recoverable */
 628		s->recoverable = false;
 629	}
 630
 631	bio_put(bio);
 632	closure_put(cl);
 633}
 634
 635static void backing_request_endio(struct bio *bio)
 636{
 637	struct closure *cl = bio->bi_private;
 638
 639	if (bio->bi_status) {
 640		struct search *s = container_of(cl, struct search, cl);
 641		struct cached_dev *dc = container_of(s->d,
 642						     struct cached_dev, disk);
 643		/*
 644		 * If a bio has REQ_PREFLUSH for writeback mode, it is
 645		 * speically assembled in cached_dev_write() for a non-zero
 646		 * write request which has REQ_PREFLUSH. we don't set
 647		 * s->iop.status by this failure, the status will be decided
 648		 * by result of bch_data_insert() operation.
 649		 */
 650		if (unlikely(s->iop.writeback &&
 651			     bio->bi_opf & REQ_PREFLUSH)) {
 652			pr_err("Can't flush %s: returned bi_status %i",
 653				dc->backing_dev_name, bio->bi_status);
 654		} else {
 655			/* set to orig_bio->bi_status in bio_complete() */
 656			s->iop.status = bio->bi_status;
 657		}
 658		s->recoverable = false;
 659		/* should count I/O error for backing device here */
 660		bch_count_backing_io_errors(dc, bio);
 661	}
 662
 663	bio_put(bio);
 664	closure_put(cl);
 665}
 666
 667static void bio_complete(struct search *s)
 668{
 669	if (s->orig_bio) {
 670		generic_end_io_acct(s->d->disk->queue,
 671				    bio_data_dir(s->orig_bio),
 672				    &s->d->disk->part0, s->start_time);
 673
 674		trace_bcache_request_end(s->d, s->orig_bio);
 675		s->orig_bio->bi_status = s->iop.status;
 676		bio_endio(s->orig_bio);
 677		s->orig_bio = NULL;
 678	}
 679}
 680
 681static void do_bio_hook(struct search *s,
 682			struct bio *orig_bio,
 683			bio_end_io_t *end_io_fn)
 684{
 685	struct bio *bio = &s->bio.bio;
 686
 687	bio_init(bio, NULL, 0);
 688	__bio_clone_fast(bio, orig_bio);
 689	/*
 690	 * bi_end_io can be set separately somewhere else, e.g. the
 691	 * variants in,
 692	 * - cache_bio->bi_end_io from cached_dev_cache_miss()
 693	 * - n->bi_end_io from cache_lookup_fn()
 694	 */
 695	bio->bi_end_io		= end_io_fn;
 696	bio->bi_private		= &s->cl;
 697
 698	bio_cnt_set(bio, 3);
 699}
 700
 701static void search_free(struct closure *cl)
 702{
 703	struct search *s = container_of(cl, struct search, cl);
 704
 
 
 705	if (s->iop.bio)
 706		bio_put(s->iop.bio);
 707
 708	bio_complete(s);
 709	closure_debug_destroy(cl);
 710	mempool_free(s, s->d->c->search);
 711}
 712
 713static inline struct search *search_alloc(struct bio *bio,
 714					  struct bcache_device *d)
 715{
 716	struct search *s;
 717
 718	s = mempool_alloc(d->c->search, GFP_NOIO);
 719
 720	closure_init(&s->cl, NULL);
 721	do_bio_hook(s, bio, request_endio);
 
 722
 723	s->orig_bio		= bio;
 724	s->cache_miss		= NULL;
 725	s->cache_missed		= 0;
 726	s->d			= d;
 727	s->recoverable		= 1;
 728	s->write		= op_is_write(bio_op(bio));
 729	s->read_dirty_data	= 0;
 730	s->start_time		= jiffies;
 731
 732	s->iop.c		= d->c;
 733	s->iop.bio		= NULL;
 734	s->iop.inode		= d->id;
 735	s->iop.write_point	= hash_long((unsigned long) current, 16);
 736	s->iop.write_prio	= 0;
 737	s->iop.status		= 0;
 738	s->iop.flags		= 0;
 739	s->iop.flush_journal	= op_is_flush(bio->bi_opf);
 740	s->iop.wq		= bcache_wq;
 741
 742	return s;
 743}
 744
 745/* Cached devices */
 746
 747static void cached_dev_bio_complete(struct closure *cl)
 748{
 749	struct search *s = container_of(cl, struct search, cl);
 750	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
 751
 
 752	search_free(cl);
 753	cached_dev_put(dc);
 754}
 755
 756/* Process reads */
 757
 758static void cached_dev_cache_miss_done(struct closure *cl)
 759{
 760	struct search *s = container_of(cl, struct search, cl);
 761
 762	if (s->iop.replace_collision)
 763		bch_mark_cache_miss_collision(s->iop.c, s->d);
 764
 765	if (s->iop.bio)
 766		bio_free_pages(s->iop.bio);
 767
 768	cached_dev_bio_complete(cl);
 769}
 770
 771static void cached_dev_read_error(struct closure *cl)
 772{
 773	struct search *s = container_of(cl, struct search, cl);
 774	struct bio *bio = &s->bio.bio;
 775
 776	/*
 777	 * If read request hit dirty data (s->read_dirty_data is true),
 778	 * then recovery a failed read request from cached device may
 779	 * get a stale data back. So read failure recovery is only
 780	 * permitted when read request hit clean data in cache device,
 781	 * or when cache read race happened.
 782	 */
 783	if (s->recoverable && !s->read_dirty_data) {
 784		/* Retry from the backing device: */
 785		trace_bcache_read_retry(s->orig_bio);
 786
 787		s->iop.status = 0;
 788		do_bio_hook(s, s->orig_bio, backing_request_endio);
 789
 790		/* XXX: invalidate cache */
 791
 792		/* I/O request sent to backing device */
 793		closure_bio_submit(s->iop.c, bio, cl);
 794	}
 795
 796	continue_at(cl, cached_dev_cache_miss_done, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 797}
 798
 799static void cached_dev_read_done(struct closure *cl)
 800{
 801	struct search *s = container_of(cl, struct search, cl);
 802	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
 803
 804	/*
 805	 * We had a cache miss; cache_bio now contains data ready to be inserted
 806	 * into the cache.
 807	 *
 808	 * First, we copy the data we just read from cache_bio's bounce buffers
 809	 * to the buffers the original bio pointed to:
 810	 */
 811
 812	if (s->iop.bio) {
 813		bio_reset(s->iop.bio);
 814		s->iop.bio->bi_iter.bi_sector = s->cache_miss->bi_iter.bi_sector;
 
 815		bio_copy_dev(s->iop.bio, s->cache_miss);
 816		s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
 817		bch_bio_map(s->iop.bio, NULL);
 818
 819		bio_copy_data(s->cache_miss, s->iop.bio);
 820
 821		bio_put(s->cache_miss);
 822		s->cache_miss = NULL;
 823	}
 824
 825	if (verify(dc) && s->recoverable && !s->read_dirty_data)
 826		bch_data_verify(dc, s->orig_bio);
 827
 
 828	bio_complete(s);
 829
 830	if (s->iop.bio &&
 831	    !test_bit(CACHE_SET_STOPPING, &s->iop.c->flags)) {
 832		BUG_ON(!s->iop.replace);
 833		closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
 834	}
 835
 836	continue_at(cl, cached_dev_cache_miss_done, NULL);
 837}
 838
 839static void cached_dev_read_done_bh(struct closure *cl)
 840{
 841	struct search *s = container_of(cl, struct search, cl);
 842	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
 843
 844	bch_mark_cache_accounting(s->iop.c, s->d,
 845				  !s->cache_missed, s->iop.bypass);
 846	trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass);
 847
 848	if (s->iop.status)
 849		continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq);
 850	else if (s->iop.bio || verify(dc))
 851		continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq);
 852	else
 853		continue_at_nobarrier(cl, cached_dev_bio_complete, NULL);
 854}
 855
 856static int cached_dev_cache_miss(struct btree *b, struct search *s,
 857				 struct bio *bio, unsigned sectors)
 858{
 859	int ret = MAP_CONTINUE;
 860	unsigned reada = 0;
 861	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
 862	struct bio *miss, *cache_bio;
 863
 864	s->cache_missed = 1;
 865
 866	if (s->cache_miss || s->iop.bypass) {
 867		miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);
 868		ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
 869		goto out_submit;
 870	}
 871
 872	if (!(bio->bi_opf & REQ_RAHEAD) &&
 873	    !(bio->bi_opf & REQ_META) &&
 874	    s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)
 875		reada = min_t(sector_t, dc->readahead >> 9,
 876			      get_capacity(bio->bi_disk) - bio_end_sector(bio));
 877
 878	s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada);
 879
 880	s->iop.replace_key = KEY(s->iop.inode,
 881				 bio->bi_iter.bi_sector + s->insert_bio_sectors,
 882				 s->insert_bio_sectors);
 883
 884	ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key);
 885	if (ret)
 886		return ret;
 887
 888	s->iop.replace = true;
 889
 890	miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);
 891
 892	/* btree_search_recurse()'s btree iterator is no good anymore */
 893	ret = miss == bio ? MAP_DONE : -EINTR;
 894
 895	cache_bio = bio_alloc_bioset(GFP_NOWAIT,
 896			DIV_ROUND_UP(s->insert_bio_sectors, PAGE_SECTORS),
 897			dc->disk.bio_split);
 898	if (!cache_bio)
 899		goto out_submit;
 900
 901	cache_bio->bi_iter.bi_sector	= miss->bi_iter.bi_sector;
 902	bio_copy_dev(cache_bio, miss);
 903	cache_bio->bi_iter.bi_size	= s->insert_bio_sectors << 9;
 904
 905	cache_bio->bi_end_io	= backing_request_endio;
 906	cache_bio->bi_private	= &s->cl;
 907
 908	bch_bio_map(cache_bio, NULL);
 909	if (bch_bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO))
 910		goto out_put;
 911
 912	if (reada)
 913		bch_mark_cache_readahead(s->iop.c, s->d);
 914
 915	s->cache_miss	= miss;
 916	s->iop.bio	= cache_bio;
 917	bio_get(cache_bio);
 918	/* I/O request sent to backing device */
 919	closure_bio_submit(s->iop.c, cache_bio, &s->cl);
 920
 921	return ret;
 922out_put:
 923	bio_put(cache_bio);
 924out_submit:
 925	miss->bi_end_io		= backing_request_endio;
 926	miss->bi_private	= &s->cl;
 927	/* I/O request sent to backing device */
 928	closure_bio_submit(s->iop.c, miss, &s->cl);
 929	return ret;
 930}
 931
 932static void cached_dev_read(struct cached_dev *dc, struct search *s)
 933{
 934	struct closure *cl = &s->cl;
 935
 936	closure_call(&s->iop.cl, cache_lookup, NULL, cl);
 937	continue_at(cl, cached_dev_read_done_bh, NULL);
 938}
 939
 940/* Process writes */
 941
 942static void cached_dev_write_complete(struct closure *cl)
 943{
 944	struct search *s = container_of(cl, struct search, cl);
 945	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
 946
 947	up_read_non_owner(&dc->writeback_lock);
 948	cached_dev_bio_complete(cl);
 949}
 950
 951static void cached_dev_write(struct cached_dev *dc, struct search *s)
 952{
 953	struct closure *cl = &s->cl;
 954	struct bio *bio = &s->bio.bio;
 955	struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0);
 956	struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0);
 957
 958	bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end);
 959
 960	down_read_non_owner(&dc->writeback_lock);
 961	if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) {
 962		/*
 963		 * We overlap with some dirty data undergoing background
 964		 * writeback, force this write to writeback
 965		 */
 966		s->iop.bypass = false;
 967		s->iop.writeback = true;
 968	}
 969
 970	/*
 971	 * Discards aren't _required_ to do anything, so skipping if
 972	 * check_overlapping returned true is ok
 973	 *
 974	 * But check_overlapping drops dirty keys for which io hasn't started,
 975	 * so we still want to call it.
 976	 */
 977	if (bio_op(bio) == REQ_OP_DISCARD)
 978		s->iop.bypass = true;
 979
 980	if (should_writeback(dc, s->orig_bio,
 981			     cache_mode(dc),
 982			     s->iop.bypass)) {
 983		s->iop.bypass = false;
 984		s->iop.writeback = true;
 985	}
 986
 987	if (s->iop.bypass) {
 988		s->iop.bio = s->orig_bio;
 989		bio_get(s->iop.bio);
 990
 991		if (bio_op(bio) == REQ_OP_DISCARD &&
 992		    !blk_queue_discard(bdev_get_queue(dc->bdev)))
 993			goto insert_data;
 994
 995		/* I/O request sent to backing device */
 996		bio->bi_end_io = backing_request_endio;
 997		closure_bio_submit(s->iop.c, bio, cl);
 998
 999	} else if (s->iop.writeback) {
1000		bch_writeback_add(dc);
1001		s->iop.bio = bio;
1002
1003		if (bio->bi_opf & REQ_PREFLUSH) {
1004			/*
1005			 * Also need to send a flush to the backing
1006			 * device.
1007			 */
1008			struct bio *flush;
1009
1010			flush = bio_alloc_bioset(GFP_NOIO, 0,
1011						 dc->disk.bio_split);
1012			if (!flush) {
1013				s->iop.status = BLK_STS_RESOURCE;
1014				goto insert_data;
1015			}
1016			bio_copy_dev(flush, bio);
1017			flush->bi_end_io = backing_request_endio;
1018			flush->bi_private = cl;
1019			flush->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
1020			/* I/O request sent to backing device */
1021			closure_bio_submit(s->iop.c, flush, cl);
1022		}
1023	} else {
1024		s->iop.bio = bio_clone_fast(bio, GFP_NOIO, dc->disk.bio_split);
1025		/* I/O request sent to backing device */
1026		bio->bi_end_io = backing_request_endio;
1027		closure_bio_submit(s->iop.c, bio, cl);
1028	}
1029
1030insert_data:
1031	closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
1032	continue_at(cl, cached_dev_write_complete, NULL);
1033}
1034
1035static void cached_dev_nodata(struct closure *cl)
1036{
1037	struct search *s = container_of(cl, struct search, cl);
1038	struct bio *bio = &s->bio.bio;
1039
1040	if (s->iop.flush_journal)
1041		bch_journal_meta(s->iop.c, cl);
1042
1043	/* If it's a flush, we send the flush to the backing device too */
1044	bio->bi_end_io = backing_request_endio;
1045	closure_bio_submit(s->iop.c, bio, cl);
1046
1047	continue_at(cl, cached_dev_bio_complete, NULL);
1048}
1049
1050struct detached_dev_io_private {
1051	struct bcache_device	*d;
1052	unsigned long		start_time;
1053	bio_end_io_t		*bi_end_io;
1054	void			*bi_private;
1055};
1056
1057static void detached_dev_end_io(struct bio *bio)
1058{
1059	struct detached_dev_io_private *ddip;
1060
1061	ddip = bio->bi_private;
1062	bio->bi_end_io = ddip->bi_end_io;
1063	bio->bi_private = ddip->bi_private;
1064
1065	generic_end_io_acct(ddip->d->disk->queue,
1066			    bio_data_dir(bio),
1067			    &ddip->d->disk->part0, ddip->start_time);
1068
1069	if (bio->bi_status) {
1070		struct cached_dev *dc = container_of(ddip->d,
1071						     struct cached_dev, disk);
1072		/* should count I/O error for backing device here */
1073		bch_count_backing_io_errors(dc, bio);
1074	}
1075
1076	kfree(ddip);
1077	bio->bi_end_io(bio);
1078}
1079
1080static void detached_dev_do_request(struct bcache_device *d, struct bio *bio)
1081{
1082	struct detached_dev_io_private *ddip;
1083	struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1084
1085	/*
1086	 * no need to call closure_get(&dc->disk.cl),
1087	 * because upper layer had already opened bcache device,
1088	 * which would call closure_get(&dc->disk.cl)
1089	 */
1090	ddip = kzalloc(sizeof(struct detached_dev_io_private), GFP_NOIO);
1091	ddip->d = d;
1092	ddip->start_time = jiffies;
1093	ddip->bi_end_io = bio->bi_end_io;
1094	ddip->bi_private = bio->bi_private;
1095	bio->bi_end_io = detached_dev_end_io;
1096	bio->bi_private = ddip;
1097
1098	if ((bio_op(bio) == REQ_OP_DISCARD) &&
1099	    !blk_queue_discard(bdev_get_queue(dc->bdev)))
1100		bio->bi_end_io(bio);
1101	else
1102		generic_make_request(bio);
1103}
1104
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1105/* Cached devices - read & write stuff */
1106
1107static blk_qc_t cached_dev_make_request(struct request_queue *q,
1108					struct bio *bio)
1109{
1110	struct search *s;
1111	struct bcache_device *d = bio->bi_disk->private_data;
1112	struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1113	int rw = bio_data_dir(bio);
1114
1115	if (unlikely((d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags)) ||
1116		     dc->io_disable)) {
1117		bio->bi_status = BLK_STS_IOERR;
1118		bio_endio(bio);
1119		return BLK_QC_T_NONE;
1120	}
1121
1122	atomic_set(&dc->backing_idle, 0);
1123	generic_start_io_acct(q, rw, bio_sectors(bio), &d->disk->part0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1124
1125	bio_set_dev(bio, dc->bdev);
1126	bio->bi_iter.bi_sector += dc->sb.data_offset;
1127
1128	if (cached_dev_get(dc)) {
1129		s = search_alloc(bio, d);
1130		trace_bcache_request_start(s->d, bio);
1131
1132		if (!bio->bi_iter.bi_size) {
1133			/*
1134			 * can't call bch_journal_meta from under
1135			 * generic_make_request
1136			 */
1137			continue_at_nobarrier(&s->cl,
1138					      cached_dev_nodata,
1139					      bcache_wq);
1140		} else {
1141			s->iop.bypass = check_should_bypass(dc, bio);
1142
1143			if (rw)
1144				cached_dev_write(dc, s);
1145			else
1146				cached_dev_read(dc, s);
1147		}
1148	} else
1149		/* I/O request sent to backing device */
1150		detached_dev_do_request(d, bio);
1151
1152	return BLK_QC_T_NONE;
1153}
1154
1155static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode,
1156			    unsigned int cmd, unsigned long arg)
1157{
1158	struct cached_dev *dc = container_of(d, struct cached_dev, disk);
 
 
 
 
1159	return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg);
1160}
1161
1162static int cached_dev_congested(void *data, int bits)
1163{
1164	struct bcache_device *d = data;
1165	struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1166	struct request_queue *q = bdev_get_queue(dc->bdev);
1167	int ret = 0;
1168
1169	if (bdi_congested(q->backing_dev_info, bits))
1170		return 1;
1171
1172	if (cached_dev_get(dc)) {
1173		unsigned i;
1174		struct cache *ca;
1175
1176		for_each_cache(ca, d->c, i) {
1177			q = bdev_get_queue(ca->bdev);
1178			ret |= bdi_congested(q->backing_dev_info, bits);
1179		}
1180
1181		cached_dev_put(dc);
1182	}
1183
1184	return ret;
1185}
1186
1187void bch_cached_dev_request_init(struct cached_dev *dc)
1188{
1189	struct gendisk *g = dc->disk.disk;
1190
1191	g->queue->make_request_fn		= cached_dev_make_request;
1192	g->queue->backing_dev_info->congested_fn = cached_dev_congested;
1193	dc->disk.cache_miss			= cached_dev_cache_miss;
1194	dc->disk.ioctl				= cached_dev_ioctl;
1195}
1196
1197/* Flash backed devices */
1198
1199static int flash_dev_cache_miss(struct btree *b, struct search *s,
1200				struct bio *bio, unsigned sectors)
1201{
1202	unsigned bytes = min(sectors, bio_sectors(bio)) << 9;
1203
1204	swap(bio->bi_iter.bi_size, bytes);
1205	zero_fill_bio(bio);
1206	swap(bio->bi_iter.bi_size, bytes);
1207
1208	bio_advance(bio, bytes);
1209
1210	if (!bio->bi_iter.bi_size)
1211		return MAP_DONE;
1212
1213	return MAP_CONTINUE;
1214}
1215
1216static void flash_dev_nodata(struct closure *cl)
1217{
1218	struct search *s = container_of(cl, struct search, cl);
1219
1220	if (s->iop.flush_journal)
1221		bch_journal_meta(s->iop.c, cl);
1222
1223	continue_at(cl, search_free, NULL);
1224}
1225
1226static blk_qc_t flash_dev_make_request(struct request_queue *q,
1227					     struct bio *bio)
1228{
1229	struct search *s;
1230	struct closure *cl;
1231	struct bcache_device *d = bio->bi_disk->private_data;
1232	int rw = bio_data_dir(bio);
1233
1234	if (unlikely(d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags))) {
1235		bio->bi_status = BLK_STS_IOERR;
1236		bio_endio(bio);
1237		return BLK_QC_T_NONE;
1238	}
1239
1240	generic_start_io_acct(q, rw, bio_sectors(bio), &d->disk->part0);
1241
1242	s = search_alloc(bio, d);
1243	cl = &s->cl;
1244	bio = &s->bio.bio;
1245
1246	trace_bcache_request_start(s->d, bio);
1247
1248	if (!bio->bi_iter.bi_size) {
1249		/*
1250		 * can't call bch_journal_meta from under
1251		 * generic_make_request
1252		 */
1253		continue_at_nobarrier(&s->cl,
1254				      flash_dev_nodata,
1255				      bcache_wq);
1256		return BLK_QC_T_NONE;
1257	} else if (rw) {
1258		bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
1259					&KEY(d->id, bio->bi_iter.bi_sector, 0),
1260					&KEY(d->id, bio_end_sector(bio), 0));
1261
1262		s->iop.bypass		= (bio_op(bio) == REQ_OP_DISCARD) != 0;
1263		s->iop.writeback	= true;
1264		s->iop.bio		= bio;
1265
1266		closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
1267	} else {
1268		closure_call(&s->iop.cl, cache_lookup, NULL, cl);
1269	}
1270
1271	continue_at(cl, search_free, NULL);
1272	return BLK_QC_T_NONE;
1273}
1274
1275static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode,
1276			   unsigned int cmd, unsigned long arg)
1277{
1278	return -ENOTTY;
1279}
1280
1281static int flash_dev_congested(void *data, int bits)
1282{
1283	struct bcache_device *d = data;
1284	struct request_queue *q;
1285	struct cache *ca;
1286	unsigned i;
1287	int ret = 0;
1288
1289	for_each_cache(ca, d->c, i) {
1290		q = bdev_get_queue(ca->bdev);
1291		ret |= bdi_congested(q->backing_dev_info, bits);
1292	}
1293
1294	return ret;
1295}
1296
1297void bch_flash_dev_request_init(struct bcache_device *d)
1298{
1299	struct gendisk *g = d->disk;
1300
1301	g->queue->make_request_fn		= flash_dev_make_request;
1302	g->queue->backing_dev_info->congested_fn = flash_dev_congested;
1303	d->cache_miss				= flash_dev_cache_miss;
1304	d->ioctl				= flash_dev_ioctl;
1305}
1306
1307void bch_request_exit(void)
1308{
1309	if (bch_search_cache)
1310		kmem_cache_destroy(bch_search_cache);
1311}
1312
1313int __init bch_request_init(void)
1314{
1315	bch_search_cache = KMEM_CACHE(search, 0);
1316	if (!bch_search_cache)
1317		return -ENOMEM;
1318
1319	return 0;
1320}