Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Main bcache entry point - handle a read or a write request and decide what to
4 * do with it; the make_request functions are called by the block layer.
5 *
6 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
7 * Copyright 2012 Google, Inc.
8 */
9
10#include "bcache.h"
11#include "btree.h"
12#include "debug.h"
13#include "request.h"
14#include "writeback.h"
15
16#include <linux/module.h>
17#include <linux/hash.h>
18#include <linux/random.h>
19#include <linux/backing-dev.h>
20
21#include <trace/events/bcache.h>
22
23#define CUTOFF_CACHE_ADD 95
24#define CUTOFF_CACHE_READA 90
25
26struct kmem_cache *bch_search_cache;
27
28static CLOSURE_CALLBACK(bch_data_insert_start);
29
30static unsigned int cache_mode(struct cached_dev *dc)
31{
32 return BDEV_CACHE_MODE(&dc->sb);
33}
34
35static bool verify(struct cached_dev *dc)
36{
37 return dc->verify;
38}
39
40static void bio_csum(struct bio *bio, struct bkey *k)
41{
42 struct bio_vec bv;
43 struct bvec_iter iter;
44 uint64_t csum = 0;
45
46 bio_for_each_segment(bv, bio, iter) {
47 void *d = bvec_kmap_local(&bv);
48
49 csum = crc64_be(csum, d, bv.bv_len);
50 kunmap_local(d);
51 }
52
53 k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1);
54}
55
56/* Insert data into cache */
57
58static CLOSURE_CALLBACK(bch_data_insert_keys)
59{
60 closure_type(op, struct data_insert_op, cl);
61 atomic_t *journal_ref = NULL;
62 struct bkey *replace_key = op->replace ? &op->replace_key : NULL;
63 int ret;
64
65 if (!op->replace)
66 journal_ref = bch_journal(op->c, &op->insert_keys,
67 op->flush_journal ? cl : NULL);
68
69 ret = bch_btree_insert(op->c, &op->insert_keys,
70 journal_ref, replace_key);
71 if (ret == -ESRCH) {
72 op->replace_collision = true;
73 } else if (ret) {
74 op->status = BLK_STS_RESOURCE;
75 op->insert_data_done = true;
76 }
77
78 if (journal_ref)
79 atomic_dec_bug(journal_ref);
80
81 if (!op->insert_data_done) {
82 continue_at(cl, bch_data_insert_start, op->wq);
83 return;
84 }
85
86 bch_keylist_free(&op->insert_keys);
87 closure_return(cl);
88}
89
90static int bch_keylist_realloc(struct keylist *l, unsigned int u64s,
91 struct cache_set *c)
92{
93 size_t oldsize = bch_keylist_nkeys(l);
94 size_t newsize = oldsize + u64s;
95
96 /*
97 * The journalling code doesn't handle the case where the keys to insert
98 * is bigger than an empty write: If we just return -ENOMEM here,
99 * bch_data_insert_keys() will insert the keys created so far
100 * and finish the rest when the keylist is empty.
101 */
102 if (newsize * sizeof(uint64_t) > block_bytes(c->cache) - sizeof(struct jset))
103 return -ENOMEM;
104
105 return __bch_keylist_realloc(l, u64s);
106}
107
108static void bch_data_invalidate(struct closure *cl)
109{
110 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
111 struct bio *bio = op->bio;
112
113 pr_debug("invalidating %i sectors from %llu\n",
114 bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector);
115
116 while (bio_sectors(bio)) {
117 unsigned int sectors = min(bio_sectors(bio),
118 1U << (KEY_SIZE_BITS - 1));
119
120 if (bch_keylist_realloc(&op->insert_keys, 2, op->c))
121 goto out;
122
123 bio->bi_iter.bi_sector += sectors;
124 bio->bi_iter.bi_size -= sectors << 9;
125
126 bch_keylist_add(&op->insert_keys,
127 &KEY(op->inode,
128 bio->bi_iter.bi_sector,
129 sectors));
130 }
131
132 op->insert_data_done = true;
133 /* get in bch_data_insert() */
134 bio_put(bio);
135out:
136 continue_at(cl, bch_data_insert_keys, op->wq);
137}
138
139static CLOSURE_CALLBACK(bch_data_insert_error)
140{
141 closure_type(op, struct data_insert_op, cl);
142
143 /*
144 * Our data write just errored, which means we've got a bunch of keys to
145 * insert that point to data that wasn't successfully written.
146 *
147 * We don't have to insert those keys but we still have to invalidate
148 * that region of the cache - so, if we just strip off all the pointers
149 * from the keys we'll accomplish just that.
150 */
151
152 struct bkey *src = op->insert_keys.keys, *dst = op->insert_keys.keys;
153
154 while (src != op->insert_keys.top) {
155 struct bkey *n = bkey_next(src);
156
157 SET_KEY_PTRS(src, 0);
158 memmove(dst, src, bkey_bytes(src));
159
160 dst = bkey_next(dst);
161 src = n;
162 }
163
164 op->insert_keys.top = dst;
165
166 bch_data_insert_keys(&cl->work);
167}
168
169static void bch_data_insert_endio(struct bio *bio)
170{
171 struct closure *cl = bio->bi_private;
172 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
173
174 if (bio->bi_status) {
175 /* TODO: We could try to recover from this. */
176 if (op->writeback)
177 op->status = bio->bi_status;
178 else if (!op->replace)
179 set_closure_fn(cl, bch_data_insert_error, op->wq);
180 else
181 set_closure_fn(cl, NULL, NULL);
182 }
183
184 bch_bbio_endio(op->c, bio, bio->bi_status, "writing data to cache");
185}
186
187static CLOSURE_CALLBACK(bch_data_insert_start)
188{
189 closure_type(op, struct data_insert_op, cl);
190 struct bio *bio = op->bio, *n;
191
192 if (op->bypass)
193 return bch_data_invalidate(cl);
194
195 if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0)
196 wake_up_gc(op->c);
197
198 /*
199 * Journal writes are marked REQ_PREFLUSH; if the original write was a
200 * flush, it'll wait on the journal write.
201 */
202 bio->bi_opf &= ~(REQ_PREFLUSH|REQ_FUA);
203
204 do {
205 unsigned int i;
206 struct bkey *k;
207 struct bio_set *split = &op->c->bio_split;
208
209 /* 1 for the device pointer and 1 for the chksum */
210 if (bch_keylist_realloc(&op->insert_keys,
211 3 + (op->csum ? 1 : 0),
212 op->c)) {
213 continue_at(cl, bch_data_insert_keys, op->wq);
214 return;
215 }
216
217 k = op->insert_keys.top;
218 bkey_init(k);
219 SET_KEY_INODE(k, op->inode);
220 SET_KEY_OFFSET(k, bio->bi_iter.bi_sector);
221
222 if (!bch_alloc_sectors(op->c, k, bio_sectors(bio),
223 op->write_point, op->write_prio,
224 op->writeback))
225 goto err;
226
227 n = bio_next_split(bio, KEY_SIZE(k), GFP_NOIO, split);
228
229 n->bi_end_io = bch_data_insert_endio;
230 n->bi_private = cl;
231
232 if (op->writeback) {
233 SET_KEY_DIRTY(k, true);
234
235 for (i = 0; i < KEY_PTRS(k); i++)
236 SET_GC_MARK(PTR_BUCKET(op->c, k, i),
237 GC_MARK_DIRTY);
238 }
239
240 SET_KEY_CSUM(k, op->csum);
241 if (KEY_CSUM(k))
242 bio_csum(n, k);
243
244 trace_bcache_cache_insert(k);
245 bch_keylist_push(&op->insert_keys);
246
247 n->bi_opf = REQ_OP_WRITE;
248 bch_submit_bbio(n, op->c, k, 0);
249 } while (n != bio);
250
251 op->insert_data_done = true;
252 continue_at(cl, bch_data_insert_keys, op->wq);
253 return;
254err:
255 /* bch_alloc_sectors() blocks if s->writeback = true */
256 BUG_ON(op->writeback);
257
258 /*
259 * But if it's not a writeback write we'd rather just bail out if
260 * there aren't any buckets ready to write to - it might take awhile and
261 * we might be starving btree writes for gc or something.
262 */
263
264 if (!op->replace) {
265 /*
266 * Writethrough write: We can't complete the write until we've
267 * updated the index. But we don't want to delay the write while
268 * we wait for buckets to be freed up, so just invalidate the
269 * rest of the write.
270 */
271 op->bypass = true;
272 return bch_data_invalidate(cl);
273 } else {
274 /*
275 * From a cache miss, we can just insert the keys for the data
276 * we have written or bail out if we didn't do anything.
277 */
278 op->insert_data_done = true;
279 bio_put(bio);
280
281 if (!bch_keylist_empty(&op->insert_keys))
282 continue_at(cl, bch_data_insert_keys, op->wq);
283 else
284 closure_return(cl);
285 }
286}
287
288/**
289 * bch_data_insert - stick some data in the cache
290 * @cl: closure pointer.
291 *
292 * This is the starting point for any data to end up in a cache device; it could
293 * be from a normal write, or a writeback write, or a write to a flash only
294 * volume - it's also used by the moving garbage collector to compact data in
295 * mostly empty buckets.
296 *
297 * It first writes the data to the cache, creating a list of keys to be inserted
298 * (if the data had to be fragmented there will be multiple keys); after the
299 * data is written it calls bch_journal, and after the keys have been added to
300 * the next journal write they're inserted into the btree.
301 *
302 * It inserts the data in op->bio; bi_sector is used for the key offset,
303 * and op->inode is used for the key inode.
304 *
305 * If op->bypass is true, instead of inserting the data it invalidates the
306 * region of the cache represented by op->bio and op->inode.
307 */
308CLOSURE_CALLBACK(bch_data_insert)
309{
310 closure_type(op, struct data_insert_op, cl);
311
312 trace_bcache_write(op->c, op->inode, op->bio,
313 op->writeback, op->bypass);
314
315 bch_keylist_init(&op->insert_keys);
316 bio_get(op->bio);
317 bch_data_insert_start(&cl->work);
318}
319
320/*
321 * Congested? Return 0 (not congested) or the limit (in sectors)
322 * beyond which we should bypass the cache due to congestion.
323 */
324unsigned int bch_get_congested(const struct cache_set *c)
325{
326 int i;
327
328 if (!c->congested_read_threshold_us &&
329 !c->congested_write_threshold_us)
330 return 0;
331
332 i = (local_clock_us() - c->congested_last_us) / 1024;
333 if (i < 0)
334 return 0;
335
336 i += atomic_read(&c->congested);
337 if (i >= 0)
338 return 0;
339
340 i += CONGESTED_MAX;
341
342 if (i > 0)
343 i = fract_exp_two(i, 6);
344
345 i -= hweight32(get_random_u32());
346
347 return i > 0 ? i : 1;
348}
349
350static void add_sequential(struct task_struct *t)
351{
352 ewma_add(t->sequential_io_avg,
353 t->sequential_io, 8, 0);
354
355 t->sequential_io = 0;
356}
357
358static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)
359{
360 return &dc->io_hash[hash_64(k, RECENT_IO_BITS)];
361}
362
363static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
364{
365 struct cache_set *c = dc->disk.c;
366 unsigned int mode = cache_mode(dc);
367 unsigned int sectors, congested;
368 struct task_struct *task = current;
369 struct io *i;
370
371 if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
372 c->gc_stats.in_use > CUTOFF_CACHE_ADD ||
373 (bio_op(bio) == REQ_OP_DISCARD))
374 goto skip;
375
376 if (mode == CACHE_MODE_NONE ||
377 (mode == CACHE_MODE_WRITEAROUND &&
378 op_is_write(bio_op(bio))))
379 goto skip;
380
381 /*
382 * If the bio is for read-ahead or background IO, bypass it or
383 * not depends on the following situations,
384 * - If the IO is for meta data, always cache it and no bypass
385 * - If the IO is not meta data, check dc->cache_reada_policy,
386 * BCH_CACHE_READA_ALL: cache it and not bypass
387 * BCH_CACHE_READA_META_ONLY: not cache it and bypass
388 * That is, read-ahead request for metadata always get cached
389 * (eg, for gfs2 or xfs).
390 */
391 if ((bio->bi_opf & (REQ_RAHEAD|REQ_BACKGROUND))) {
392 if (!(bio->bi_opf & (REQ_META|REQ_PRIO)) &&
393 (dc->cache_readahead_policy != BCH_CACHE_READA_ALL))
394 goto skip;
395 }
396
397 if (bio->bi_iter.bi_sector & (c->cache->sb.block_size - 1) ||
398 bio_sectors(bio) & (c->cache->sb.block_size - 1)) {
399 pr_debug("skipping unaligned io\n");
400 goto skip;
401 }
402
403 if (bypass_torture_test(dc)) {
404 if (get_random_u32_below(4) == 3)
405 goto skip;
406 else
407 goto rescale;
408 }
409
410 congested = bch_get_congested(c);
411 if (!congested && !dc->sequential_cutoff)
412 goto rescale;
413
414 spin_lock(&dc->io_lock);
415
416 hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash)
417 if (i->last == bio->bi_iter.bi_sector &&
418 time_before(jiffies, i->jiffies))
419 goto found;
420
421 i = list_first_entry(&dc->io_lru, struct io, lru);
422
423 add_sequential(task);
424 i->sequential = 0;
425found:
426 if (i->sequential + bio->bi_iter.bi_size > i->sequential)
427 i->sequential += bio->bi_iter.bi_size;
428
429 i->last = bio_end_sector(bio);
430 i->jiffies = jiffies + msecs_to_jiffies(5000);
431 task->sequential_io = i->sequential;
432
433 hlist_del(&i->hash);
434 hlist_add_head(&i->hash, iohash(dc, i->last));
435 list_move_tail(&i->lru, &dc->io_lru);
436
437 spin_unlock(&dc->io_lock);
438
439 sectors = max(task->sequential_io,
440 task->sequential_io_avg) >> 9;
441
442 if (dc->sequential_cutoff &&
443 sectors >= dc->sequential_cutoff >> 9) {
444 trace_bcache_bypass_sequential(bio);
445 goto skip;
446 }
447
448 if (congested && sectors >= congested) {
449 trace_bcache_bypass_congested(bio);
450 goto skip;
451 }
452
453rescale:
454 bch_rescale_priorities(c, bio_sectors(bio));
455 return false;
456skip:
457 bch_mark_sectors_bypassed(c, dc, bio_sectors(bio));
458 return true;
459}
460
461/* Cache lookup */
462
463struct search {
464 /* Stack frame for bio_complete */
465 struct closure cl;
466
467 struct bbio bio;
468 struct bio *orig_bio;
469 struct bio *cache_miss;
470 struct bcache_device *d;
471
472 unsigned int insert_bio_sectors;
473 unsigned int recoverable:1;
474 unsigned int write:1;
475 unsigned int read_dirty_data:1;
476 unsigned int cache_missed:1;
477
478 struct block_device *orig_bdev;
479 unsigned long start_time;
480
481 struct btree_op op;
482 struct data_insert_op iop;
483};
484
485static void bch_cache_read_endio(struct bio *bio)
486{
487 struct bbio *b = container_of(bio, struct bbio, bio);
488 struct closure *cl = bio->bi_private;
489 struct search *s = container_of(cl, struct search, cl);
490
491 /*
492 * If the bucket was reused while our bio was in flight, we might have
493 * read the wrong data. Set s->error but not error so it doesn't get
494 * counted against the cache device, but we'll still reread the data
495 * from the backing device.
496 */
497
498 if (bio->bi_status)
499 s->iop.status = bio->bi_status;
500 else if (!KEY_DIRTY(&b->key) &&
501 ptr_stale(s->iop.c, &b->key, 0)) {
502 atomic_long_inc(&s->iop.c->cache_read_races);
503 s->iop.status = BLK_STS_IOERR;
504 }
505
506 bch_bbio_endio(s->iop.c, bio, bio->bi_status, "reading from cache");
507}
508
509/*
510 * Read from a single key, handling the initial cache miss if the key starts in
511 * the middle of the bio
512 */
513static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
514{
515 struct search *s = container_of(op, struct search, op);
516 struct bio *n, *bio = &s->bio.bio;
517 struct bkey *bio_key;
518 unsigned int ptr;
519
520 if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0)
521 return MAP_CONTINUE;
522
523 if (KEY_INODE(k) != s->iop.inode ||
524 KEY_START(k) > bio->bi_iter.bi_sector) {
525 unsigned int bio_sectors = bio_sectors(bio);
526 unsigned int sectors = KEY_INODE(k) == s->iop.inode
527 ? min_t(uint64_t, INT_MAX,
528 KEY_START(k) - bio->bi_iter.bi_sector)
529 : INT_MAX;
530 int ret = s->d->cache_miss(b, s, bio, sectors);
531
532 if (ret != MAP_CONTINUE)
533 return ret;
534
535 /* if this was a complete miss we shouldn't get here */
536 BUG_ON(bio_sectors <= sectors);
537 }
538
539 if (!KEY_SIZE(k))
540 return MAP_CONTINUE;
541
542 /* XXX: figure out best pointer - for multiple cache devices */
543 ptr = 0;
544
545 PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO;
546
547 if (KEY_DIRTY(k))
548 s->read_dirty_data = true;
549
550 n = bio_next_split(bio, min_t(uint64_t, INT_MAX,
551 KEY_OFFSET(k) - bio->bi_iter.bi_sector),
552 GFP_NOIO, &s->d->bio_split);
553
554 bio_key = &container_of(n, struct bbio, bio)->key;
555 bch_bkey_copy_single_ptr(bio_key, k, ptr);
556
557 bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key);
558 bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key);
559
560 n->bi_end_io = bch_cache_read_endio;
561 n->bi_private = &s->cl;
562
563 /*
564 * The bucket we're reading from might be reused while our bio
565 * is in flight, and we could then end up reading the wrong
566 * data.
567 *
568 * We guard against this by checking (in cache_read_endio()) if
569 * the pointer is stale again; if so, we treat it as an error
570 * and reread from the backing device (but we don't pass that
571 * error up anywhere).
572 */
573
574 __bch_submit_bbio(n, b->c);
575 return n == bio ? MAP_DONE : MAP_CONTINUE;
576}
577
578static CLOSURE_CALLBACK(cache_lookup)
579{
580 closure_type(s, struct search, iop.cl);
581 struct bio *bio = &s->bio.bio;
582 struct cached_dev *dc;
583 int ret;
584
585 bch_btree_op_init(&s->op, -1);
586
587 ret = bch_btree_map_keys(&s->op, s->iop.c,
588 &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0),
589 cache_lookup_fn, MAP_END_KEY);
590 if (ret == -EAGAIN) {
591 continue_at(cl, cache_lookup, bcache_wq);
592 return;
593 }
594
595 /*
596 * We might meet err when searching the btree, If that happens, we will
597 * get negative ret, in this scenario we should not recover data from
598 * backing device (when cache device is dirty) because we don't know
599 * whether bkeys the read request covered are all clean.
600 *
601 * And after that happened, s->iop.status is still its initial value
602 * before we submit s->bio.bio
603 */
604 if (ret < 0) {
605 BUG_ON(ret == -EINTR);
606 if (s->d && s->d->c &&
607 !UUID_FLASH_ONLY(&s->d->c->uuids[s->d->id])) {
608 dc = container_of(s->d, struct cached_dev, disk);
609 if (dc && atomic_read(&dc->has_dirty))
610 s->recoverable = false;
611 }
612 if (!s->iop.status)
613 s->iop.status = BLK_STS_IOERR;
614 }
615
616 closure_return(cl);
617}
618
619/* Common code for the make_request functions */
620
621static void request_endio(struct bio *bio)
622{
623 struct closure *cl = bio->bi_private;
624
625 if (bio->bi_status) {
626 struct search *s = container_of(cl, struct search, cl);
627
628 s->iop.status = bio->bi_status;
629 /* Only cache read errors are recoverable */
630 s->recoverable = false;
631 }
632
633 bio_put(bio);
634 closure_put(cl);
635}
636
637static void backing_request_endio(struct bio *bio)
638{
639 struct closure *cl = bio->bi_private;
640
641 if (bio->bi_status) {
642 struct search *s = container_of(cl, struct search, cl);
643 struct cached_dev *dc = container_of(s->d,
644 struct cached_dev, disk);
645 /*
646 * If a bio has REQ_PREFLUSH for writeback mode, it is
647 * speically assembled in cached_dev_write() for a non-zero
648 * write request which has REQ_PREFLUSH. we don't set
649 * s->iop.status by this failure, the status will be decided
650 * by result of bch_data_insert() operation.
651 */
652 if (unlikely(s->iop.writeback &&
653 bio->bi_opf & REQ_PREFLUSH)) {
654 pr_err("Can't flush %pg: returned bi_status %i\n",
655 dc->bdev, bio->bi_status);
656 } else {
657 /* set to orig_bio->bi_status in bio_complete() */
658 s->iop.status = bio->bi_status;
659 }
660 s->recoverable = false;
661 /* should count I/O error for backing device here */
662 bch_count_backing_io_errors(dc, bio);
663 }
664
665 bio_put(bio);
666 closure_put(cl);
667}
668
669static void bio_complete(struct search *s)
670{
671 if (s->orig_bio) {
672 /* Count on bcache device */
673 bio_end_io_acct_remapped(s->orig_bio, s->start_time,
674 s->orig_bdev);
675 trace_bcache_request_end(s->d, s->orig_bio);
676 s->orig_bio->bi_status = s->iop.status;
677 bio_endio(s->orig_bio);
678 s->orig_bio = NULL;
679 }
680}
681
682static void do_bio_hook(struct search *s,
683 struct bio *orig_bio,
684 bio_end_io_t *end_io_fn)
685{
686 struct bio *bio = &s->bio.bio;
687
688 bio_init_clone(orig_bio->bi_bdev, bio, orig_bio, GFP_NOIO);
689 /*
690 * bi_end_io can be set separately somewhere else, e.g. the
691 * variants in,
692 * - cache_bio->bi_end_io from cached_dev_cache_miss()
693 * - n->bi_end_io from cache_lookup_fn()
694 */
695 bio->bi_end_io = end_io_fn;
696 bio->bi_private = &s->cl;
697
698 bio_cnt_set(bio, 3);
699}
700
701static CLOSURE_CALLBACK(search_free)
702{
703 closure_type(s, struct search, cl);
704
705 atomic_dec(&s->iop.c->search_inflight);
706
707 if (s->iop.bio)
708 bio_put(s->iop.bio);
709
710 bio_complete(s);
711 closure_debug_destroy(cl);
712 mempool_free(s, &s->iop.c->search);
713}
714
715static inline struct search *search_alloc(struct bio *bio,
716 struct bcache_device *d, struct block_device *orig_bdev,
717 unsigned long start_time)
718{
719 struct search *s;
720
721 s = mempool_alloc(&d->c->search, GFP_NOIO);
722
723 closure_init(&s->cl, NULL);
724 do_bio_hook(s, bio, request_endio);
725 atomic_inc(&d->c->search_inflight);
726
727 s->orig_bio = bio;
728 s->cache_miss = NULL;
729 s->cache_missed = 0;
730 s->d = d;
731 s->recoverable = 1;
732 s->write = op_is_write(bio_op(bio));
733 s->read_dirty_data = 0;
734 /* Count on the bcache device */
735 s->orig_bdev = orig_bdev;
736 s->start_time = start_time;
737 s->iop.c = d->c;
738 s->iop.bio = NULL;
739 s->iop.inode = d->id;
740 s->iop.write_point = hash_long((unsigned long) current, 16);
741 s->iop.write_prio = 0;
742 s->iop.status = 0;
743 s->iop.flags = 0;
744 s->iop.flush_journal = op_is_flush(bio->bi_opf);
745 s->iop.wq = bcache_wq;
746
747 return s;
748}
749
750/* Cached devices */
751
752static CLOSURE_CALLBACK(cached_dev_bio_complete)
753{
754 closure_type(s, struct search, cl);
755 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
756
757 cached_dev_put(dc);
758 search_free(&cl->work);
759}
760
761/* Process reads */
762
763static CLOSURE_CALLBACK(cached_dev_read_error_done)
764{
765 closure_type(s, struct search, cl);
766
767 if (s->iop.replace_collision)
768 bch_mark_cache_miss_collision(s->iop.c, s->d);
769
770 if (s->iop.bio)
771 bio_free_pages(s->iop.bio);
772
773 cached_dev_bio_complete(&cl->work);
774}
775
776static CLOSURE_CALLBACK(cached_dev_read_error)
777{
778 closure_type(s, struct search, cl);
779 struct bio *bio = &s->bio.bio;
780
781 /*
782 * If read request hit dirty data (s->read_dirty_data is true),
783 * then recovery a failed read request from cached device may
784 * get a stale data back. So read failure recovery is only
785 * permitted when read request hit clean data in cache device,
786 * or when cache read race happened.
787 */
788 if (s->recoverable && !s->read_dirty_data) {
789 /* Retry from the backing device: */
790 trace_bcache_read_retry(s->orig_bio);
791
792 s->iop.status = 0;
793 do_bio_hook(s, s->orig_bio, backing_request_endio);
794
795 /* XXX: invalidate cache */
796
797 /* I/O request sent to backing device */
798 closure_bio_submit(s->iop.c, bio, cl);
799 }
800
801 continue_at(cl, cached_dev_read_error_done, NULL);
802}
803
804static CLOSURE_CALLBACK(cached_dev_cache_miss_done)
805{
806 closure_type(s, struct search, cl);
807 struct bcache_device *d = s->d;
808
809 if (s->iop.replace_collision)
810 bch_mark_cache_miss_collision(s->iop.c, s->d);
811
812 if (s->iop.bio)
813 bio_free_pages(s->iop.bio);
814
815 cached_dev_bio_complete(&cl->work);
816 closure_put(&d->cl);
817}
818
819static CLOSURE_CALLBACK(cached_dev_read_done)
820{
821 closure_type(s, struct search, cl);
822 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
823
824 /*
825 * We had a cache miss; cache_bio now contains data ready to be inserted
826 * into the cache.
827 *
828 * First, we copy the data we just read from cache_bio's bounce buffers
829 * to the buffers the original bio pointed to:
830 */
831
832 if (s->iop.bio) {
833 bio_reset(s->iop.bio, s->cache_miss->bi_bdev, REQ_OP_READ);
834 s->iop.bio->bi_iter.bi_sector =
835 s->cache_miss->bi_iter.bi_sector;
836 s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
837 bio_clone_blkg_association(s->iop.bio, s->cache_miss);
838 bch_bio_map(s->iop.bio, NULL);
839
840 bio_copy_data(s->cache_miss, s->iop.bio);
841
842 bio_put(s->cache_miss);
843 s->cache_miss = NULL;
844 }
845
846 if (verify(dc) && s->recoverable && !s->read_dirty_data)
847 bch_data_verify(dc, s->orig_bio);
848
849 closure_get(&dc->disk.cl);
850 bio_complete(s);
851
852 if (s->iop.bio &&
853 !test_bit(CACHE_SET_STOPPING, &s->iop.c->flags)) {
854 BUG_ON(!s->iop.replace);
855 closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
856 }
857
858 continue_at(cl, cached_dev_cache_miss_done, NULL);
859}
860
861static CLOSURE_CALLBACK(cached_dev_read_done_bh)
862{
863 closure_type(s, struct search, cl);
864 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
865
866 bch_mark_cache_accounting(s->iop.c, s->d,
867 !s->cache_missed, s->iop.bypass);
868 trace_bcache_read(s->orig_bio, !s->cache_missed, s->iop.bypass);
869
870 if (s->iop.status)
871 continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq);
872 else if (s->iop.bio || verify(dc))
873 continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq);
874 else
875 continue_at_nobarrier(cl, cached_dev_bio_complete, NULL);
876}
877
878static int cached_dev_cache_miss(struct btree *b, struct search *s,
879 struct bio *bio, unsigned int sectors)
880{
881 int ret = MAP_CONTINUE;
882 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
883 struct bio *miss, *cache_bio;
884 unsigned int size_limit;
885
886 s->cache_missed = 1;
887
888 if (s->cache_miss || s->iop.bypass) {
889 miss = bio_next_split(bio, sectors, GFP_NOIO, &s->d->bio_split);
890 ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
891 goto out_submit;
892 }
893
894 /* Limitation for valid replace key size and cache_bio bvecs number */
895 size_limit = min_t(unsigned int, BIO_MAX_VECS * PAGE_SECTORS,
896 (1 << KEY_SIZE_BITS) - 1);
897 s->insert_bio_sectors = min3(size_limit, sectors, bio_sectors(bio));
898
899 s->iop.replace_key = KEY(s->iop.inode,
900 bio->bi_iter.bi_sector + s->insert_bio_sectors,
901 s->insert_bio_sectors);
902
903 ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key);
904 if (ret)
905 return ret;
906
907 s->iop.replace = true;
908
909 miss = bio_next_split(bio, s->insert_bio_sectors, GFP_NOIO,
910 &s->d->bio_split);
911
912 /* btree_search_recurse()'s btree iterator is no good anymore */
913 ret = miss == bio ? MAP_DONE : -EINTR;
914
915 cache_bio = bio_alloc_bioset(miss->bi_bdev,
916 DIV_ROUND_UP(s->insert_bio_sectors, PAGE_SECTORS),
917 0, GFP_NOWAIT, &dc->disk.bio_split);
918 if (!cache_bio)
919 goto out_submit;
920
921 cache_bio->bi_iter.bi_sector = miss->bi_iter.bi_sector;
922 cache_bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
923
924 cache_bio->bi_end_io = backing_request_endio;
925 cache_bio->bi_private = &s->cl;
926
927 bch_bio_map(cache_bio, NULL);
928 if (bch_bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO))
929 goto out_put;
930
931 s->cache_miss = miss;
932 s->iop.bio = cache_bio;
933 bio_get(cache_bio);
934 /* I/O request sent to backing device */
935 closure_bio_submit(s->iop.c, cache_bio, &s->cl);
936
937 return ret;
938out_put:
939 bio_put(cache_bio);
940out_submit:
941 miss->bi_end_io = backing_request_endio;
942 miss->bi_private = &s->cl;
943 /* I/O request sent to backing device */
944 closure_bio_submit(s->iop.c, miss, &s->cl);
945 return ret;
946}
947
948static void cached_dev_read(struct cached_dev *dc, struct search *s)
949{
950 struct closure *cl = &s->cl;
951
952 closure_call(&s->iop.cl, cache_lookup, NULL, cl);
953 continue_at(cl, cached_dev_read_done_bh, NULL);
954}
955
956/* Process writes */
957
958static CLOSURE_CALLBACK(cached_dev_write_complete)
959{
960 closure_type(s, struct search, cl);
961 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
962
963 up_read_non_owner(&dc->writeback_lock);
964 cached_dev_bio_complete(&cl->work);
965}
966
967static void cached_dev_write(struct cached_dev *dc, struct search *s)
968{
969 struct closure *cl = &s->cl;
970 struct bio *bio = &s->bio.bio;
971 struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0);
972 struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0);
973
974 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end);
975
976 down_read_non_owner(&dc->writeback_lock);
977 if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) {
978 /*
979 * We overlap with some dirty data undergoing background
980 * writeback, force this write to writeback
981 */
982 s->iop.bypass = false;
983 s->iop.writeback = true;
984 }
985
986 /*
987 * Discards aren't _required_ to do anything, so skipping if
988 * check_overlapping returned true is ok
989 *
990 * But check_overlapping drops dirty keys for which io hasn't started,
991 * so we still want to call it.
992 */
993 if (bio_op(bio) == REQ_OP_DISCARD)
994 s->iop.bypass = true;
995
996 if (should_writeback(dc, s->orig_bio,
997 cache_mode(dc),
998 s->iop.bypass)) {
999 s->iop.bypass = false;
1000 s->iop.writeback = true;
1001 }
1002
1003 if (s->iop.bypass) {
1004 s->iop.bio = s->orig_bio;
1005 bio_get(s->iop.bio);
1006
1007 if (bio_op(bio) == REQ_OP_DISCARD &&
1008 !bdev_max_discard_sectors(dc->bdev))
1009 goto insert_data;
1010
1011 /* I/O request sent to backing device */
1012 bio->bi_end_io = backing_request_endio;
1013 closure_bio_submit(s->iop.c, bio, cl);
1014
1015 } else if (s->iop.writeback) {
1016 bch_writeback_add(dc);
1017 s->iop.bio = bio;
1018
1019 if (bio->bi_opf & REQ_PREFLUSH) {
1020 /*
1021 * Also need to send a flush to the backing
1022 * device.
1023 */
1024 struct bio *flush;
1025
1026 flush = bio_alloc_bioset(bio->bi_bdev, 0,
1027 REQ_OP_WRITE | REQ_PREFLUSH,
1028 GFP_NOIO, &dc->disk.bio_split);
1029 if (!flush) {
1030 s->iop.status = BLK_STS_RESOURCE;
1031 goto insert_data;
1032 }
1033 flush->bi_end_io = backing_request_endio;
1034 flush->bi_private = cl;
1035 /* I/O request sent to backing device */
1036 closure_bio_submit(s->iop.c, flush, cl);
1037 }
1038 } else {
1039 s->iop.bio = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO,
1040 &dc->disk.bio_split);
1041 /* I/O request sent to backing device */
1042 bio->bi_end_io = backing_request_endio;
1043 closure_bio_submit(s->iop.c, bio, cl);
1044 }
1045
1046insert_data:
1047 closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
1048 continue_at(cl, cached_dev_write_complete, NULL);
1049}
1050
1051static CLOSURE_CALLBACK(cached_dev_nodata)
1052{
1053 closure_type(s, struct search, cl);
1054 struct bio *bio = &s->bio.bio;
1055
1056 if (s->iop.flush_journal)
1057 bch_journal_meta(s->iop.c, cl);
1058
1059 /* If it's a flush, we send the flush to the backing device too */
1060 bio->bi_end_io = backing_request_endio;
1061 closure_bio_submit(s->iop.c, bio, cl);
1062
1063 continue_at(cl, cached_dev_bio_complete, NULL);
1064}
1065
1066struct detached_dev_io_private {
1067 struct bcache_device *d;
1068 unsigned long start_time;
1069 bio_end_io_t *bi_end_io;
1070 void *bi_private;
1071 struct block_device *orig_bdev;
1072};
1073
1074static void detached_dev_end_io(struct bio *bio)
1075{
1076 struct detached_dev_io_private *ddip;
1077
1078 ddip = bio->bi_private;
1079 bio->bi_end_io = ddip->bi_end_io;
1080 bio->bi_private = ddip->bi_private;
1081
1082 /* Count on the bcache device */
1083 bio_end_io_acct_remapped(bio, ddip->start_time, ddip->orig_bdev);
1084
1085 if (bio->bi_status) {
1086 struct cached_dev *dc = container_of(ddip->d,
1087 struct cached_dev, disk);
1088 /* should count I/O error for backing device here */
1089 bch_count_backing_io_errors(dc, bio);
1090 }
1091
1092 kfree(ddip);
1093 bio->bi_end_io(bio);
1094}
1095
1096static void detached_dev_do_request(struct bcache_device *d, struct bio *bio,
1097 struct block_device *orig_bdev, unsigned long start_time)
1098{
1099 struct detached_dev_io_private *ddip;
1100 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1101
1102 /*
1103 * no need to call closure_get(&dc->disk.cl),
1104 * because upper layer had already opened bcache device,
1105 * which would call closure_get(&dc->disk.cl)
1106 */
1107 ddip = kzalloc(sizeof(struct detached_dev_io_private), GFP_NOIO);
1108 if (!ddip) {
1109 bio->bi_status = BLK_STS_RESOURCE;
1110 bio->bi_end_io(bio);
1111 return;
1112 }
1113
1114 ddip->d = d;
1115 /* Count on the bcache device */
1116 ddip->orig_bdev = orig_bdev;
1117 ddip->start_time = start_time;
1118 ddip->bi_end_io = bio->bi_end_io;
1119 ddip->bi_private = bio->bi_private;
1120 bio->bi_end_io = detached_dev_end_io;
1121 bio->bi_private = ddip;
1122
1123 if ((bio_op(bio) == REQ_OP_DISCARD) &&
1124 !bdev_max_discard_sectors(dc->bdev))
1125 bio->bi_end_io(bio);
1126 else
1127 submit_bio_noacct(bio);
1128}
1129
1130static void quit_max_writeback_rate(struct cache_set *c,
1131 struct cached_dev *this_dc)
1132{
1133 int i;
1134 struct bcache_device *d;
1135 struct cached_dev *dc;
1136
1137 /*
1138 * mutex bch_register_lock may compete with other parallel requesters,
1139 * or attach/detach operations on other backing device. Waiting to
1140 * the mutex lock may increase I/O request latency for seconds or more.
1141 * To avoid such situation, if mutext_trylock() failed, only writeback
1142 * rate of current cached device is set to 1, and __update_write_back()
1143 * will decide writeback rate of other cached devices (remember now
1144 * c->idle_counter is 0 already).
1145 */
1146 if (mutex_trylock(&bch_register_lock)) {
1147 for (i = 0; i < c->devices_max_used; i++) {
1148 if (!c->devices[i])
1149 continue;
1150
1151 if (UUID_FLASH_ONLY(&c->uuids[i]))
1152 continue;
1153
1154 d = c->devices[i];
1155 dc = container_of(d, struct cached_dev, disk);
1156 /*
1157 * set writeback rate to default minimum value,
1158 * then let update_writeback_rate() to decide the
1159 * upcoming rate.
1160 */
1161 atomic_long_set(&dc->writeback_rate.rate, 1);
1162 }
1163 mutex_unlock(&bch_register_lock);
1164 } else
1165 atomic_long_set(&this_dc->writeback_rate.rate, 1);
1166}
1167
1168/* Cached devices - read & write stuff */
1169
1170void cached_dev_submit_bio(struct bio *bio)
1171{
1172 struct search *s;
1173 struct block_device *orig_bdev = bio->bi_bdev;
1174 struct bcache_device *d = orig_bdev->bd_disk->private_data;
1175 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1176 unsigned long start_time;
1177 int rw = bio_data_dir(bio);
1178
1179 if (unlikely((d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags)) ||
1180 dc->io_disable)) {
1181 bio->bi_status = BLK_STS_IOERR;
1182 bio_endio(bio);
1183 return;
1184 }
1185
1186 if (likely(d->c)) {
1187 if (atomic_read(&d->c->idle_counter))
1188 atomic_set(&d->c->idle_counter, 0);
1189 /*
1190 * If at_max_writeback_rate of cache set is true and new I/O
1191 * comes, quit max writeback rate of all cached devices
1192 * attached to this cache set, and set at_max_writeback_rate
1193 * to false.
1194 */
1195 if (unlikely(atomic_read(&d->c->at_max_writeback_rate) == 1)) {
1196 atomic_set(&d->c->at_max_writeback_rate, 0);
1197 quit_max_writeback_rate(d->c, dc);
1198 }
1199 }
1200
1201 start_time = bio_start_io_acct(bio);
1202
1203 bio_set_dev(bio, dc->bdev);
1204 bio->bi_iter.bi_sector += dc->sb.data_offset;
1205
1206 if (cached_dev_get(dc)) {
1207 s = search_alloc(bio, d, orig_bdev, start_time);
1208 trace_bcache_request_start(s->d, bio);
1209
1210 if (!bio->bi_iter.bi_size) {
1211 /*
1212 * can't call bch_journal_meta from under
1213 * submit_bio_noacct
1214 */
1215 continue_at_nobarrier(&s->cl,
1216 cached_dev_nodata,
1217 bcache_wq);
1218 } else {
1219 s->iop.bypass = check_should_bypass(dc, bio);
1220
1221 if (rw)
1222 cached_dev_write(dc, s);
1223 else
1224 cached_dev_read(dc, s);
1225 }
1226 } else
1227 /* I/O request sent to backing device */
1228 detached_dev_do_request(d, bio, orig_bdev, start_time);
1229}
1230
1231static int cached_dev_ioctl(struct bcache_device *d, blk_mode_t mode,
1232 unsigned int cmd, unsigned long arg)
1233{
1234 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1235
1236 if (dc->io_disable)
1237 return -EIO;
1238 if (!dc->bdev->bd_disk->fops->ioctl)
1239 return -ENOTTY;
1240 return dc->bdev->bd_disk->fops->ioctl(dc->bdev, mode, cmd, arg);
1241}
1242
1243void bch_cached_dev_request_init(struct cached_dev *dc)
1244{
1245 dc->disk.cache_miss = cached_dev_cache_miss;
1246 dc->disk.ioctl = cached_dev_ioctl;
1247}
1248
1249/* Flash backed devices */
1250
1251static int flash_dev_cache_miss(struct btree *b, struct search *s,
1252 struct bio *bio, unsigned int sectors)
1253{
1254 unsigned int bytes = min(sectors, bio_sectors(bio)) << 9;
1255
1256 swap(bio->bi_iter.bi_size, bytes);
1257 zero_fill_bio(bio);
1258 swap(bio->bi_iter.bi_size, bytes);
1259
1260 bio_advance(bio, bytes);
1261
1262 if (!bio->bi_iter.bi_size)
1263 return MAP_DONE;
1264
1265 return MAP_CONTINUE;
1266}
1267
1268static CLOSURE_CALLBACK(flash_dev_nodata)
1269{
1270 closure_type(s, struct search, cl);
1271
1272 if (s->iop.flush_journal)
1273 bch_journal_meta(s->iop.c, cl);
1274
1275 continue_at(cl, search_free, NULL);
1276}
1277
1278void flash_dev_submit_bio(struct bio *bio)
1279{
1280 struct search *s;
1281 struct closure *cl;
1282 struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
1283
1284 if (unlikely(d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags))) {
1285 bio->bi_status = BLK_STS_IOERR;
1286 bio_endio(bio);
1287 return;
1288 }
1289
1290 s = search_alloc(bio, d, bio->bi_bdev, bio_start_io_acct(bio));
1291 cl = &s->cl;
1292 bio = &s->bio.bio;
1293
1294 trace_bcache_request_start(s->d, bio);
1295
1296 if (!bio->bi_iter.bi_size) {
1297 /*
1298 * can't call bch_journal_meta from under submit_bio_noacct
1299 */
1300 continue_at_nobarrier(&s->cl,
1301 flash_dev_nodata,
1302 bcache_wq);
1303 return;
1304 } else if (bio_data_dir(bio)) {
1305 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
1306 &KEY(d->id, bio->bi_iter.bi_sector, 0),
1307 &KEY(d->id, bio_end_sector(bio), 0));
1308
1309 s->iop.bypass = (bio_op(bio) == REQ_OP_DISCARD) != 0;
1310 s->iop.writeback = true;
1311 s->iop.bio = bio;
1312
1313 closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
1314 } else {
1315 closure_call(&s->iop.cl, cache_lookup, NULL, cl);
1316 }
1317
1318 continue_at(cl, search_free, NULL);
1319}
1320
1321static int flash_dev_ioctl(struct bcache_device *d, blk_mode_t mode,
1322 unsigned int cmd, unsigned long arg)
1323{
1324 return -ENOTTY;
1325}
1326
1327void bch_flash_dev_request_init(struct bcache_device *d)
1328{
1329 d->cache_miss = flash_dev_cache_miss;
1330 d->ioctl = flash_dev_ioctl;
1331}
1332
1333void bch_request_exit(void)
1334{
1335 kmem_cache_destroy(bch_search_cache);
1336}
1337
1338int __init bch_request_init(void)
1339{
1340 bch_search_cache = KMEM_CACHE(search, 0);
1341 if (!bch_search_cache)
1342 return -ENOMEM;
1343
1344 return 0;
1345}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Main bcache entry point - handle a read or a write request and decide what to
4 * do with it; the make_request functions are called by the block layer.
5 *
6 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
7 * Copyright 2012 Google, Inc.
8 */
9
10#include "bcache.h"
11#include "btree.h"
12#include "debug.h"
13#include "request.h"
14#include "writeback.h"
15
16#include <linux/module.h>
17#include <linux/hash.h>
18#include <linux/random.h>
19#include <linux/backing-dev.h>
20
21#include <trace/events/bcache.h>
22
23#define CUTOFF_CACHE_ADD 95
24#define CUTOFF_CACHE_READA 90
25
26struct kmem_cache *bch_search_cache;
27
28static void bch_data_insert_start(struct closure *cl);
29
30static unsigned int cache_mode(struct cached_dev *dc)
31{
32 return BDEV_CACHE_MODE(&dc->sb);
33}
34
35static bool verify(struct cached_dev *dc)
36{
37 return dc->verify;
38}
39
40static void bio_csum(struct bio *bio, struct bkey *k)
41{
42 struct bio_vec bv;
43 struct bvec_iter iter;
44 uint64_t csum = 0;
45
46 bio_for_each_segment(bv, bio, iter) {
47 void *d = kmap(bv.bv_page) + bv.bv_offset;
48
49 csum = bch_crc64_update(csum, d, bv.bv_len);
50 kunmap(bv.bv_page);
51 }
52
53 k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1);
54}
55
56/* Insert data into cache */
57
58static void bch_data_insert_keys(struct closure *cl)
59{
60 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
61 atomic_t *journal_ref = NULL;
62 struct bkey *replace_key = op->replace ? &op->replace_key : NULL;
63 int ret;
64
65 /*
66 * If we're looping, might already be waiting on
67 * another journal write - can't wait on more than one journal write at
68 * a time
69 *
70 * XXX: this looks wrong
71 */
72#if 0
73 while (atomic_read(&s->cl.remaining) & CLOSURE_WAITING)
74 closure_sync(&s->cl);
75#endif
76
77 if (!op->replace)
78 journal_ref = bch_journal(op->c, &op->insert_keys,
79 op->flush_journal ? cl : NULL);
80
81 ret = bch_btree_insert(op->c, &op->insert_keys,
82 journal_ref, replace_key);
83 if (ret == -ESRCH) {
84 op->replace_collision = true;
85 } else if (ret) {
86 op->status = BLK_STS_RESOURCE;
87 op->insert_data_done = true;
88 }
89
90 if (journal_ref)
91 atomic_dec_bug(journal_ref);
92
93 if (!op->insert_data_done) {
94 continue_at(cl, bch_data_insert_start, op->wq);
95 return;
96 }
97
98 bch_keylist_free(&op->insert_keys);
99 closure_return(cl);
100}
101
102static int bch_keylist_realloc(struct keylist *l, unsigned int u64s,
103 struct cache_set *c)
104{
105 size_t oldsize = bch_keylist_nkeys(l);
106 size_t newsize = oldsize + u64s;
107
108 /*
109 * The journalling code doesn't handle the case where the keys to insert
110 * is bigger than an empty write: If we just return -ENOMEM here,
111 * bch_data_insert_keys() will insert the keys created so far
112 * and finish the rest when the keylist is empty.
113 */
114 if (newsize * sizeof(uint64_t) > block_bytes(c) - sizeof(struct jset))
115 return -ENOMEM;
116
117 return __bch_keylist_realloc(l, u64s);
118}
119
120static void bch_data_invalidate(struct closure *cl)
121{
122 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
123 struct bio *bio = op->bio;
124
125 pr_debug("invalidating %i sectors from %llu",
126 bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector);
127
128 while (bio_sectors(bio)) {
129 unsigned int sectors = min(bio_sectors(bio),
130 1U << (KEY_SIZE_BITS - 1));
131
132 if (bch_keylist_realloc(&op->insert_keys, 2, op->c))
133 goto out;
134
135 bio->bi_iter.bi_sector += sectors;
136 bio->bi_iter.bi_size -= sectors << 9;
137
138 bch_keylist_add(&op->insert_keys,
139 &KEY(op->inode,
140 bio->bi_iter.bi_sector,
141 sectors));
142 }
143
144 op->insert_data_done = true;
145 /* get in bch_data_insert() */
146 bio_put(bio);
147out:
148 continue_at(cl, bch_data_insert_keys, op->wq);
149}
150
151static void bch_data_insert_error(struct closure *cl)
152{
153 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
154
155 /*
156 * Our data write just errored, which means we've got a bunch of keys to
157 * insert that point to data that wasn't successfully written.
158 *
159 * We don't have to insert those keys but we still have to invalidate
160 * that region of the cache - so, if we just strip off all the pointers
161 * from the keys we'll accomplish just that.
162 */
163
164 struct bkey *src = op->insert_keys.keys, *dst = op->insert_keys.keys;
165
166 while (src != op->insert_keys.top) {
167 struct bkey *n = bkey_next(src);
168
169 SET_KEY_PTRS(src, 0);
170 memmove(dst, src, bkey_bytes(src));
171
172 dst = bkey_next(dst);
173 src = n;
174 }
175
176 op->insert_keys.top = dst;
177
178 bch_data_insert_keys(cl);
179}
180
181static void bch_data_insert_endio(struct bio *bio)
182{
183 struct closure *cl = bio->bi_private;
184 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
185
186 if (bio->bi_status) {
187 /* TODO: We could try to recover from this. */
188 if (op->writeback)
189 op->status = bio->bi_status;
190 else if (!op->replace)
191 set_closure_fn(cl, bch_data_insert_error, op->wq);
192 else
193 set_closure_fn(cl, NULL, NULL);
194 }
195
196 bch_bbio_endio(op->c, bio, bio->bi_status, "writing data to cache");
197}
198
199static void bch_data_insert_start(struct closure *cl)
200{
201 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
202 struct bio *bio = op->bio, *n;
203
204 if (op->bypass)
205 return bch_data_invalidate(cl);
206
207 if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0)
208 wake_up_gc(op->c);
209
210 /*
211 * Journal writes are marked REQ_PREFLUSH; if the original write was a
212 * flush, it'll wait on the journal write.
213 */
214 bio->bi_opf &= ~(REQ_PREFLUSH|REQ_FUA);
215
216 do {
217 unsigned int i;
218 struct bkey *k;
219 struct bio_set *split = &op->c->bio_split;
220
221 /* 1 for the device pointer and 1 for the chksum */
222 if (bch_keylist_realloc(&op->insert_keys,
223 3 + (op->csum ? 1 : 0),
224 op->c)) {
225 continue_at(cl, bch_data_insert_keys, op->wq);
226 return;
227 }
228
229 k = op->insert_keys.top;
230 bkey_init(k);
231 SET_KEY_INODE(k, op->inode);
232 SET_KEY_OFFSET(k, bio->bi_iter.bi_sector);
233
234 if (!bch_alloc_sectors(op->c, k, bio_sectors(bio),
235 op->write_point, op->write_prio,
236 op->writeback))
237 goto err;
238
239 n = bio_next_split(bio, KEY_SIZE(k), GFP_NOIO, split);
240
241 n->bi_end_io = bch_data_insert_endio;
242 n->bi_private = cl;
243
244 if (op->writeback) {
245 SET_KEY_DIRTY(k, true);
246
247 for (i = 0; i < KEY_PTRS(k); i++)
248 SET_GC_MARK(PTR_BUCKET(op->c, k, i),
249 GC_MARK_DIRTY);
250 }
251
252 SET_KEY_CSUM(k, op->csum);
253 if (KEY_CSUM(k))
254 bio_csum(n, k);
255
256 trace_bcache_cache_insert(k);
257 bch_keylist_push(&op->insert_keys);
258
259 bio_set_op_attrs(n, REQ_OP_WRITE, 0);
260 bch_submit_bbio(n, op->c, k, 0);
261 } while (n != bio);
262
263 op->insert_data_done = true;
264 continue_at(cl, bch_data_insert_keys, op->wq);
265 return;
266err:
267 /* bch_alloc_sectors() blocks if s->writeback = true */
268 BUG_ON(op->writeback);
269
270 /*
271 * But if it's not a writeback write we'd rather just bail out if
272 * there aren't any buckets ready to write to - it might take awhile and
273 * we might be starving btree writes for gc or something.
274 */
275
276 if (!op->replace) {
277 /*
278 * Writethrough write: We can't complete the write until we've
279 * updated the index. But we don't want to delay the write while
280 * we wait for buckets to be freed up, so just invalidate the
281 * rest of the write.
282 */
283 op->bypass = true;
284 return bch_data_invalidate(cl);
285 } else {
286 /*
287 * From a cache miss, we can just insert the keys for the data
288 * we have written or bail out if we didn't do anything.
289 */
290 op->insert_data_done = true;
291 bio_put(bio);
292
293 if (!bch_keylist_empty(&op->insert_keys))
294 continue_at(cl, bch_data_insert_keys, op->wq);
295 else
296 closure_return(cl);
297 }
298}
299
300/**
301 * bch_data_insert - stick some data in the cache
302 * @cl: closure pointer.
303 *
304 * This is the starting point for any data to end up in a cache device; it could
305 * be from a normal write, or a writeback write, or a write to a flash only
306 * volume - it's also used by the moving garbage collector to compact data in
307 * mostly empty buckets.
308 *
309 * It first writes the data to the cache, creating a list of keys to be inserted
310 * (if the data had to be fragmented there will be multiple keys); after the
311 * data is written it calls bch_journal, and after the keys have been added to
312 * the next journal write they're inserted into the btree.
313 *
314 * It inserts the data in op->bio; bi_sector is used for the key offset,
315 * and op->inode is used for the key inode.
316 *
317 * If op->bypass is true, instead of inserting the data it invalidates the
318 * region of the cache represented by op->bio and op->inode.
319 */
320void bch_data_insert(struct closure *cl)
321{
322 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
323
324 trace_bcache_write(op->c, op->inode, op->bio,
325 op->writeback, op->bypass);
326
327 bch_keylist_init(&op->insert_keys);
328 bio_get(op->bio);
329 bch_data_insert_start(cl);
330}
331
332/*
333 * Congested? Return 0 (not congested) or the limit (in sectors)
334 * beyond which we should bypass the cache due to congestion.
335 */
336unsigned int bch_get_congested(const struct cache_set *c)
337{
338 int i;
339
340 if (!c->congested_read_threshold_us &&
341 !c->congested_write_threshold_us)
342 return 0;
343
344 i = (local_clock_us() - c->congested_last_us) / 1024;
345 if (i < 0)
346 return 0;
347
348 i += atomic_read(&c->congested);
349 if (i >= 0)
350 return 0;
351
352 i += CONGESTED_MAX;
353
354 if (i > 0)
355 i = fract_exp_two(i, 6);
356
357 i -= hweight32(get_random_u32());
358
359 return i > 0 ? i : 1;
360}
361
362static void add_sequential(struct task_struct *t)
363{
364 ewma_add(t->sequential_io_avg,
365 t->sequential_io, 8, 0);
366
367 t->sequential_io = 0;
368}
369
370static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)
371{
372 return &dc->io_hash[hash_64(k, RECENT_IO_BITS)];
373}
374
375static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
376{
377 struct cache_set *c = dc->disk.c;
378 unsigned int mode = cache_mode(dc);
379 unsigned int sectors, congested;
380 struct task_struct *task = current;
381 struct io *i;
382
383 if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
384 c->gc_stats.in_use > CUTOFF_CACHE_ADD ||
385 (bio_op(bio) == REQ_OP_DISCARD))
386 goto skip;
387
388 if (mode == CACHE_MODE_NONE ||
389 (mode == CACHE_MODE_WRITEAROUND &&
390 op_is_write(bio_op(bio))))
391 goto skip;
392
393 /*
394 * Flag for bypass if the IO is for read-ahead or background,
395 * unless the read-ahead request is for metadata
396 * (eg, for gfs2 or xfs).
397 */
398 if (bio->bi_opf & (REQ_RAHEAD|REQ_BACKGROUND) &&
399 !(bio->bi_opf & (REQ_META|REQ_PRIO)))
400 goto skip;
401
402 if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
403 bio_sectors(bio) & (c->sb.block_size - 1)) {
404 pr_debug("skipping unaligned io");
405 goto skip;
406 }
407
408 if (bypass_torture_test(dc)) {
409 if ((get_random_int() & 3) == 3)
410 goto skip;
411 else
412 goto rescale;
413 }
414
415 congested = bch_get_congested(c);
416 if (!congested && !dc->sequential_cutoff)
417 goto rescale;
418
419 spin_lock(&dc->io_lock);
420
421 hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash)
422 if (i->last == bio->bi_iter.bi_sector &&
423 time_before(jiffies, i->jiffies))
424 goto found;
425
426 i = list_first_entry(&dc->io_lru, struct io, lru);
427
428 add_sequential(task);
429 i->sequential = 0;
430found:
431 if (i->sequential + bio->bi_iter.bi_size > i->sequential)
432 i->sequential += bio->bi_iter.bi_size;
433
434 i->last = bio_end_sector(bio);
435 i->jiffies = jiffies + msecs_to_jiffies(5000);
436 task->sequential_io = i->sequential;
437
438 hlist_del(&i->hash);
439 hlist_add_head(&i->hash, iohash(dc, i->last));
440 list_move_tail(&i->lru, &dc->io_lru);
441
442 spin_unlock(&dc->io_lock);
443
444 sectors = max(task->sequential_io,
445 task->sequential_io_avg) >> 9;
446
447 if (dc->sequential_cutoff &&
448 sectors >= dc->sequential_cutoff >> 9) {
449 trace_bcache_bypass_sequential(bio);
450 goto skip;
451 }
452
453 if (congested && sectors >= congested) {
454 trace_bcache_bypass_congested(bio);
455 goto skip;
456 }
457
458rescale:
459 bch_rescale_priorities(c, bio_sectors(bio));
460 return false;
461skip:
462 bch_mark_sectors_bypassed(c, dc, bio_sectors(bio));
463 return true;
464}
465
466/* Cache lookup */
467
468struct search {
469 /* Stack frame for bio_complete */
470 struct closure cl;
471
472 struct bbio bio;
473 struct bio *orig_bio;
474 struct bio *cache_miss;
475 struct bcache_device *d;
476
477 unsigned int insert_bio_sectors;
478 unsigned int recoverable:1;
479 unsigned int write:1;
480 unsigned int read_dirty_data:1;
481 unsigned int cache_missed:1;
482
483 unsigned long start_time;
484
485 struct btree_op op;
486 struct data_insert_op iop;
487};
488
489static void bch_cache_read_endio(struct bio *bio)
490{
491 struct bbio *b = container_of(bio, struct bbio, bio);
492 struct closure *cl = bio->bi_private;
493 struct search *s = container_of(cl, struct search, cl);
494
495 /*
496 * If the bucket was reused while our bio was in flight, we might have
497 * read the wrong data. Set s->error but not error so it doesn't get
498 * counted against the cache device, but we'll still reread the data
499 * from the backing device.
500 */
501
502 if (bio->bi_status)
503 s->iop.status = bio->bi_status;
504 else if (!KEY_DIRTY(&b->key) &&
505 ptr_stale(s->iop.c, &b->key, 0)) {
506 atomic_long_inc(&s->iop.c->cache_read_races);
507 s->iop.status = BLK_STS_IOERR;
508 }
509
510 bch_bbio_endio(s->iop.c, bio, bio->bi_status, "reading from cache");
511}
512
513/*
514 * Read from a single key, handling the initial cache miss if the key starts in
515 * the middle of the bio
516 */
517static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
518{
519 struct search *s = container_of(op, struct search, op);
520 struct bio *n, *bio = &s->bio.bio;
521 struct bkey *bio_key;
522 unsigned int ptr;
523
524 if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0)
525 return MAP_CONTINUE;
526
527 if (KEY_INODE(k) != s->iop.inode ||
528 KEY_START(k) > bio->bi_iter.bi_sector) {
529 unsigned int bio_sectors = bio_sectors(bio);
530 unsigned int sectors = KEY_INODE(k) == s->iop.inode
531 ? min_t(uint64_t, INT_MAX,
532 KEY_START(k) - bio->bi_iter.bi_sector)
533 : INT_MAX;
534 int ret = s->d->cache_miss(b, s, bio, sectors);
535
536 if (ret != MAP_CONTINUE)
537 return ret;
538
539 /* if this was a complete miss we shouldn't get here */
540 BUG_ON(bio_sectors <= sectors);
541 }
542
543 if (!KEY_SIZE(k))
544 return MAP_CONTINUE;
545
546 /* XXX: figure out best pointer - for multiple cache devices */
547 ptr = 0;
548
549 PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO;
550
551 if (KEY_DIRTY(k))
552 s->read_dirty_data = true;
553
554 n = bio_next_split(bio, min_t(uint64_t, INT_MAX,
555 KEY_OFFSET(k) - bio->bi_iter.bi_sector),
556 GFP_NOIO, &s->d->bio_split);
557
558 bio_key = &container_of(n, struct bbio, bio)->key;
559 bch_bkey_copy_single_ptr(bio_key, k, ptr);
560
561 bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key);
562 bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key);
563
564 n->bi_end_io = bch_cache_read_endio;
565 n->bi_private = &s->cl;
566
567 /*
568 * The bucket we're reading from might be reused while our bio
569 * is in flight, and we could then end up reading the wrong
570 * data.
571 *
572 * We guard against this by checking (in cache_read_endio()) if
573 * the pointer is stale again; if so, we treat it as an error
574 * and reread from the backing device (but we don't pass that
575 * error up anywhere).
576 */
577
578 __bch_submit_bbio(n, b->c);
579 return n == bio ? MAP_DONE : MAP_CONTINUE;
580}
581
582static void cache_lookup(struct closure *cl)
583{
584 struct search *s = container_of(cl, struct search, iop.cl);
585 struct bio *bio = &s->bio.bio;
586 struct cached_dev *dc;
587 int ret;
588
589 bch_btree_op_init(&s->op, -1);
590
591 ret = bch_btree_map_keys(&s->op, s->iop.c,
592 &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0),
593 cache_lookup_fn, MAP_END_KEY);
594 if (ret == -EAGAIN) {
595 continue_at(cl, cache_lookup, bcache_wq);
596 return;
597 }
598
599 /*
600 * We might meet err when searching the btree, If that happens, we will
601 * get negative ret, in this scenario we should not recover data from
602 * backing device (when cache device is dirty) because we don't know
603 * whether bkeys the read request covered are all clean.
604 *
605 * And after that happened, s->iop.status is still its initial value
606 * before we submit s->bio.bio
607 */
608 if (ret < 0) {
609 BUG_ON(ret == -EINTR);
610 if (s->d && s->d->c &&
611 !UUID_FLASH_ONLY(&s->d->c->uuids[s->d->id])) {
612 dc = container_of(s->d, struct cached_dev, disk);
613 if (dc && atomic_read(&dc->has_dirty))
614 s->recoverable = false;
615 }
616 if (!s->iop.status)
617 s->iop.status = BLK_STS_IOERR;
618 }
619
620 closure_return(cl);
621}
622
623/* Common code for the make_request functions */
624
625static void request_endio(struct bio *bio)
626{
627 struct closure *cl = bio->bi_private;
628
629 if (bio->bi_status) {
630 struct search *s = container_of(cl, struct search, cl);
631
632 s->iop.status = bio->bi_status;
633 /* Only cache read errors are recoverable */
634 s->recoverable = false;
635 }
636
637 bio_put(bio);
638 closure_put(cl);
639}
640
641static void backing_request_endio(struct bio *bio)
642{
643 struct closure *cl = bio->bi_private;
644
645 if (bio->bi_status) {
646 struct search *s = container_of(cl, struct search, cl);
647 struct cached_dev *dc = container_of(s->d,
648 struct cached_dev, disk);
649 /*
650 * If a bio has REQ_PREFLUSH for writeback mode, it is
651 * speically assembled in cached_dev_write() for a non-zero
652 * write request which has REQ_PREFLUSH. we don't set
653 * s->iop.status by this failure, the status will be decided
654 * by result of bch_data_insert() operation.
655 */
656 if (unlikely(s->iop.writeback &&
657 bio->bi_opf & REQ_PREFLUSH)) {
658 pr_err("Can't flush %s: returned bi_status %i",
659 dc->backing_dev_name, bio->bi_status);
660 } else {
661 /* set to orig_bio->bi_status in bio_complete() */
662 s->iop.status = bio->bi_status;
663 }
664 s->recoverable = false;
665 /* should count I/O error for backing device here */
666 bch_count_backing_io_errors(dc, bio);
667 }
668
669 bio_put(bio);
670 closure_put(cl);
671}
672
673static void bio_complete(struct search *s)
674{
675 if (s->orig_bio) {
676 generic_end_io_acct(s->d->disk->queue, bio_op(s->orig_bio),
677 &s->d->disk->part0, s->start_time);
678
679 trace_bcache_request_end(s->d, s->orig_bio);
680 s->orig_bio->bi_status = s->iop.status;
681 bio_endio(s->orig_bio);
682 s->orig_bio = NULL;
683 }
684}
685
686static void do_bio_hook(struct search *s,
687 struct bio *orig_bio,
688 bio_end_io_t *end_io_fn)
689{
690 struct bio *bio = &s->bio.bio;
691
692 bio_init(bio, NULL, 0);
693 __bio_clone_fast(bio, orig_bio);
694 /*
695 * bi_end_io can be set separately somewhere else, e.g. the
696 * variants in,
697 * - cache_bio->bi_end_io from cached_dev_cache_miss()
698 * - n->bi_end_io from cache_lookup_fn()
699 */
700 bio->bi_end_io = end_io_fn;
701 bio->bi_private = &s->cl;
702
703 bio_cnt_set(bio, 3);
704}
705
706static void search_free(struct closure *cl)
707{
708 struct search *s = container_of(cl, struct search, cl);
709
710 atomic_dec(&s->iop.c->search_inflight);
711
712 if (s->iop.bio)
713 bio_put(s->iop.bio);
714
715 bio_complete(s);
716 closure_debug_destroy(cl);
717 mempool_free(s, &s->iop.c->search);
718}
719
720static inline struct search *search_alloc(struct bio *bio,
721 struct bcache_device *d)
722{
723 struct search *s;
724
725 s = mempool_alloc(&d->c->search, GFP_NOIO);
726
727 closure_init(&s->cl, NULL);
728 do_bio_hook(s, bio, request_endio);
729 atomic_inc(&d->c->search_inflight);
730
731 s->orig_bio = bio;
732 s->cache_miss = NULL;
733 s->cache_missed = 0;
734 s->d = d;
735 s->recoverable = 1;
736 s->write = op_is_write(bio_op(bio));
737 s->read_dirty_data = 0;
738 s->start_time = jiffies;
739
740 s->iop.c = d->c;
741 s->iop.bio = NULL;
742 s->iop.inode = d->id;
743 s->iop.write_point = hash_long((unsigned long) current, 16);
744 s->iop.write_prio = 0;
745 s->iop.status = 0;
746 s->iop.flags = 0;
747 s->iop.flush_journal = op_is_flush(bio->bi_opf);
748 s->iop.wq = bcache_wq;
749
750 return s;
751}
752
753/* Cached devices */
754
755static void cached_dev_bio_complete(struct closure *cl)
756{
757 struct search *s = container_of(cl, struct search, cl);
758 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
759
760 cached_dev_put(dc);
761 search_free(cl);
762}
763
764/* Process reads */
765
766static void cached_dev_read_error_done(struct closure *cl)
767{
768 struct search *s = container_of(cl, struct search, cl);
769
770 if (s->iop.replace_collision)
771 bch_mark_cache_miss_collision(s->iop.c, s->d);
772
773 if (s->iop.bio)
774 bio_free_pages(s->iop.bio);
775
776 cached_dev_bio_complete(cl);
777}
778
779static void cached_dev_read_error(struct closure *cl)
780{
781 struct search *s = container_of(cl, struct search, cl);
782 struct bio *bio = &s->bio.bio;
783
784 /*
785 * If read request hit dirty data (s->read_dirty_data is true),
786 * then recovery a failed read request from cached device may
787 * get a stale data back. So read failure recovery is only
788 * permitted when read request hit clean data in cache device,
789 * or when cache read race happened.
790 */
791 if (s->recoverable && !s->read_dirty_data) {
792 /* Retry from the backing device: */
793 trace_bcache_read_retry(s->orig_bio);
794
795 s->iop.status = 0;
796 do_bio_hook(s, s->orig_bio, backing_request_endio);
797
798 /* XXX: invalidate cache */
799
800 /* I/O request sent to backing device */
801 closure_bio_submit(s->iop.c, bio, cl);
802 }
803
804 continue_at(cl, cached_dev_read_error_done, NULL);
805}
806
807static void cached_dev_cache_miss_done(struct closure *cl)
808{
809 struct search *s = container_of(cl, struct search, cl);
810 struct bcache_device *d = s->d;
811
812 if (s->iop.replace_collision)
813 bch_mark_cache_miss_collision(s->iop.c, s->d);
814
815 if (s->iop.bio)
816 bio_free_pages(s->iop.bio);
817
818 cached_dev_bio_complete(cl);
819 closure_put(&d->cl);
820}
821
822static void cached_dev_read_done(struct closure *cl)
823{
824 struct search *s = container_of(cl, struct search, cl);
825 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
826
827 /*
828 * We had a cache miss; cache_bio now contains data ready to be inserted
829 * into the cache.
830 *
831 * First, we copy the data we just read from cache_bio's bounce buffers
832 * to the buffers the original bio pointed to:
833 */
834
835 if (s->iop.bio) {
836 bio_reset(s->iop.bio);
837 s->iop.bio->bi_iter.bi_sector =
838 s->cache_miss->bi_iter.bi_sector;
839 bio_copy_dev(s->iop.bio, s->cache_miss);
840 s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
841 bch_bio_map(s->iop.bio, NULL);
842
843 bio_copy_data(s->cache_miss, s->iop.bio);
844
845 bio_put(s->cache_miss);
846 s->cache_miss = NULL;
847 }
848
849 if (verify(dc) && s->recoverable && !s->read_dirty_data)
850 bch_data_verify(dc, s->orig_bio);
851
852 closure_get(&dc->disk.cl);
853 bio_complete(s);
854
855 if (s->iop.bio &&
856 !test_bit(CACHE_SET_STOPPING, &s->iop.c->flags)) {
857 BUG_ON(!s->iop.replace);
858 closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
859 }
860
861 continue_at(cl, cached_dev_cache_miss_done, NULL);
862}
863
864static void cached_dev_read_done_bh(struct closure *cl)
865{
866 struct search *s = container_of(cl, struct search, cl);
867 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
868
869 bch_mark_cache_accounting(s->iop.c, s->d,
870 !s->cache_missed, s->iop.bypass);
871 trace_bcache_read(s->orig_bio, !s->cache_missed, s->iop.bypass);
872
873 if (s->iop.status)
874 continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq);
875 else if (s->iop.bio || verify(dc))
876 continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq);
877 else
878 continue_at_nobarrier(cl, cached_dev_bio_complete, NULL);
879}
880
881static int cached_dev_cache_miss(struct btree *b, struct search *s,
882 struct bio *bio, unsigned int sectors)
883{
884 int ret = MAP_CONTINUE;
885 unsigned int reada = 0;
886 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
887 struct bio *miss, *cache_bio;
888
889 s->cache_missed = 1;
890
891 if (s->cache_miss || s->iop.bypass) {
892 miss = bio_next_split(bio, sectors, GFP_NOIO, &s->d->bio_split);
893 ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
894 goto out_submit;
895 }
896
897 if (!(bio->bi_opf & REQ_RAHEAD) &&
898 !(bio->bi_opf & (REQ_META|REQ_PRIO)) &&
899 s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)
900 reada = min_t(sector_t, dc->readahead >> 9,
901 get_capacity(bio->bi_disk) - bio_end_sector(bio));
902
903 s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada);
904
905 s->iop.replace_key = KEY(s->iop.inode,
906 bio->bi_iter.bi_sector + s->insert_bio_sectors,
907 s->insert_bio_sectors);
908
909 ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key);
910 if (ret)
911 return ret;
912
913 s->iop.replace = true;
914
915 miss = bio_next_split(bio, sectors, GFP_NOIO, &s->d->bio_split);
916
917 /* btree_search_recurse()'s btree iterator is no good anymore */
918 ret = miss == bio ? MAP_DONE : -EINTR;
919
920 cache_bio = bio_alloc_bioset(GFP_NOWAIT,
921 DIV_ROUND_UP(s->insert_bio_sectors, PAGE_SECTORS),
922 &dc->disk.bio_split);
923 if (!cache_bio)
924 goto out_submit;
925
926 cache_bio->bi_iter.bi_sector = miss->bi_iter.bi_sector;
927 bio_copy_dev(cache_bio, miss);
928 cache_bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
929
930 cache_bio->bi_end_io = backing_request_endio;
931 cache_bio->bi_private = &s->cl;
932
933 bch_bio_map(cache_bio, NULL);
934 if (bch_bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO))
935 goto out_put;
936
937 if (reada)
938 bch_mark_cache_readahead(s->iop.c, s->d);
939
940 s->cache_miss = miss;
941 s->iop.bio = cache_bio;
942 bio_get(cache_bio);
943 /* I/O request sent to backing device */
944 closure_bio_submit(s->iop.c, cache_bio, &s->cl);
945
946 return ret;
947out_put:
948 bio_put(cache_bio);
949out_submit:
950 miss->bi_end_io = backing_request_endio;
951 miss->bi_private = &s->cl;
952 /* I/O request sent to backing device */
953 closure_bio_submit(s->iop.c, miss, &s->cl);
954 return ret;
955}
956
957static void cached_dev_read(struct cached_dev *dc, struct search *s)
958{
959 struct closure *cl = &s->cl;
960
961 closure_call(&s->iop.cl, cache_lookup, NULL, cl);
962 continue_at(cl, cached_dev_read_done_bh, NULL);
963}
964
965/* Process writes */
966
967static void cached_dev_write_complete(struct closure *cl)
968{
969 struct search *s = container_of(cl, struct search, cl);
970 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
971
972 up_read_non_owner(&dc->writeback_lock);
973 cached_dev_bio_complete(cl);
974}
975
976static void cached_dev_write(struct cached_dev *dc, struct search *s)
977{
978 struct closure *cl = &s->cl;
979 struct bio *bio = &s->bio.bio;
980 struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0);
981 struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0);
982
983 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end);
984
985 down_read_non_owner(&dc->writeback_lock);
986 if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) {
987 /*
988 * We overlap with some dirty data undergoing background
989 * writeback, force this write to writeback
990 */
991 s->iop.bypass = false;
992 s->iop.writeback = true;
993 }
994
995 /*
996 * Discards aren't _required_ to do anything, so skipping if
997 * check_overlapping returned true is ok
998 *
999 * But check_overlapping drops dirty keys for which io hasn't started,
1000 * so we still want to call it.
1001 */
1002 if (bio_op(bio) == REQ_OP_DISCARD)
1003 s->iop.bypass = true;
1004
1005 if (should_writeback(dc, s->orig_bio,
1006 cache_mode(dc),
1007 s->iop.bypass)) {
1008 s->iop.bypass = false;
1009 s->iop.writeback = true;
1010 }
1011
1012 if (s->iop.bypass) {
1013 s->iop.bio = s->orig_bio;
1014 bio_get(s->iop.bio);
1015
1016 if (bio_op(bio) == REQ_OP_DISCARD &&
1017 !blk_queue_discard(bdev_get_queue(dc->bdev)))
1018 goto insert_data;
1019
1020 /* I/O request sent to backing device */
1021 bio->bi_end_io = backing_request_endio;
1022 closure_bio_submit(s->iop.c, bio, cl);
1023
1024 } else if (s->iop.writeback) {
1025 bch_writeback_add(dc);
1026 s->iop.bio = bio;
1027
1028 if (bio->bi_opf & REQ_PREFLUSH) {
1029 /*
1030 * Also need to send a flush to the backing
1031 * device.
1032 */
1033 struct bio *flush;
1034
1035 flush = bio_alloc_bioset(GFP_NOIO, 0,
1036 &dc->disk.bio_split);
1037 if (!flush) {
1038 s->iop.status = BLK_STS_RESOURCE;
1039 goto insert_data;
1040 }
1041 bio_copy_dev(flush, bio);
1042 flush->bi_end_io = backing_request_endio;
1043 flush->bi_private = cl;
1044 flush->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
1045 /* I/O request sent to backing device */
1046 closure_bio_submit(s->iop.c, flush, cl);
1047 }
1048 } else {
1049 s->iop.bio = bio_clone_fast(bio, GFP_NOIO, &dc->disk.bio_split);
1050 /* I/O request sent to backing device */
1051 bio->bi_end_io = backing_request_endio;
1052 closure_bio_submit(s->iop.c, bio, cl);
1053 }
1054
1055insert_data:
1056 closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
1057 continue_at(cl, cached_dev_write_complete, NULL);
1058}
1059
1060static void cached_dev_nodata(struct closure *cl)
1061{
1062 struct search *s = container_of(cl, struct search, cl);
1063 struct bio *bio = &s->bio.bio;
1064
1065 if (s->iop.flush_journal)
1066 bch_journal_meta(s->iop.c, cl);
1067
1068 /* If it's a flush, we send the flush to the backing device too */
1069 bio->bi_end_io = backing_request_endio;
1070 closure_bio_submit(s->iop.c, bio, cl);
1071
1072 continue_at(cl, cached_dev_bio_complete, NULL);
1073}
1074
1075struct detached_dev_io_private {
1076 struct bcache_device *d;
1077 unsigned long start_time;
1078 bio_end_io_t *bi_end_io;
1079 void *bi_private;
1080};
1081
1082static void detached_dev_end_io(struct bio *bio)
1083{
1084 struct detached_dev_io_private *ddip;
1085
1086 ddip = bio->bi_private;
1087 bio->bi_end_io = ddip->bi_end_io;
1088 bio->bi_private = ddip->bi_private;
1089
1090 generic_end_io_acct(ddip->d->disk->queue, bio_op(bio),
1091 &ddip->d->disk->part0, ddip->start_time);
1092
1093 if (bio->bi_status) {
1094 struct cached_dev *dc = container_of(ddip->d,
1095 struct cached_dev, disk);
1096 /* should count I/O error for backing device here */
1097 bch_count_backing_io_errors(dc, bio);
1098 }
1099
1100 kfree(ddip);
1101 bio->bi_end_io(bio);
1102}
1103
1104static void detached_dev_do_request(struct bcache_device *d, struct bio *bio)
1105{
1106 struct detached_dev_io_private *ddip;
1107 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1108
1109 /*
1110 * no need to call closure_get(&dc->disk.cl),
1111 * because upper layer had already opened bcache device,
1112 * which would call closure_get(&dc->disk.cl)
1113 */
1114 ddip = kzalloc(sizeof(struct detached_dev_io_private), GFP_NOIO);
1115 ddip->d = d;
1116 ddip->start_time = jiffies;
1117 ddip->bi_end_io = bio->bi_end_io;
1118 ddip->bi_private = bio->bi_private;
1119 bio->bi_end_io = detached_dev_end_io;
1120 bio->bi_private = ddip;
1121
1122 if ((bio_op(bio) == REQ_OP_DISCARD) &&
1123 !blk_queue_discard(bdev_get_queue(dc->bdev)))
1124 bio->bi_end_io(bio);
1125 else
1126 generic_make_request(bio);
1127}
1128
1129static void quit_max_writeback_rate(struct cache_set *c,
1130 struct cached_dev *this_dc)
1131{
1132 int i;
1133 struct bcache_device *d;
1134 struct cached_dev *dc;
1135
1136 /*
1137 * mutex bch_register_lock may compete with other parallel requesters,
1138 * or attach/detach operations on other backing device. Waiting to
1139 * the mutex lock may increase I/O request latency for seconds or more.
1140 * To avoid such situation, if mutext_trylock() failed, only writeback
1141 * rate of current cached device is set to 1, and __update_write_back()
1142 * will decide writeback rate of other cached devices (remember now
1143 * c->idle_counter is 0 already).
1144 */
1145 if (mutex_trylock(&bch_register_lock)) {
1146 for (i = 0; i < c->devices_max_used; i++) {
1147 if (!c->devices[i])
1148 continue;
1149
1150 if (UUID_FLASH_ONLY(&c->uuids[i]))
1151 continue;
1152
1153 d = c->devices[i];
1154 dc = container_of(d, struct cached_dev, disk);
1155 /*
1156 * set writeback rate to default minimum value,
1157 * then let update_writeback_rate() to decide the
1158 * upcoming rate.
1159 */
1160 atomic_long_set(&dc->writeback_rate.rate, 1);
1161 }
1162 mutex_unlock(&bch_register_lock);
1163 } else
1164 atomic_long_set(&this_dc->writeback_rate.rate, 1);
1165}
1166
1167/* Cached devices - read & write stuff */
1168
1169static blk_qc_t cached_dev_make_request(struct request_queue *q,
1170 struct bio *bio)
1171{
1172 struct search *s;
1173 struct bcache_device *d = bio->bi_disk->private_data;
1174 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1175 int rw = bio_data_dir(bio);
1176
1177 if (unlikely((d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags)) ||
1178 dc->io_disable)) {
1179 bio->bi_status = BLK_STS_IOERR;
1180 bio_endio(bio);
1181 return BLK_QC_T_NONE;
1182 }
1183
1184 if (likely(d->c)) {
1185 if (atomic_read(&d->c->idle_counter))
1186 atomic_set(&d->c->idle_counter, 0);
1187 /*
1188 * If at_max_writeback_rate of cache set is true and new I/O
1189 * comes, quit max writeback rate of all cached devices
1190 * attached to this cache set, and set at_max_writeback_rate
1191 * to false.
1192 */
1193 if (unlikely(atomic_read(&d->c->at_max_writeback_rate) == 1)) {
1194 atomic_set(&d->c->at_max_writeback_rate, 0);
1195 quit_max_writeback_rate(d->c, dc);
1196 }
1197 }
1198
1199 generic_start_io_acct(q,
1200 bio_op(bio),
1201 bio_sectors(bio),
1202 &d->disk->part0);
1203
1204 bio_set_dev(bio, dc->bdev);
1205 bio->bi_iter.bi_sector += dc->sb.data_offset;
1206
1207 if (cached_dev_get(dc)) {
1208 s = search_alloc(bio, d);
1209 trace_bcache_request_start(s->d, bio);
1210
1211 if (!bio->bi_iter.bi_size) {
1212 /*
1213 * can't call bch_journal_meta from under
1214 * generic_make_request
1215 */
1216 continue_at_nobarrier(&s->cl,
1217 cached_dev_nodata,
1218 bcache_wq);
1219 } else {
1220 s->iop.bypass = check_should_bypass(dc, bio);
1221
1222 if (rw)
1223 cached_dev_write(dc, s);
1224 else
1225 cached_dev_read(dc, s);
1226 }
1227 } else
1228 /* I/O request sent to backing device */
1229 detached_dev_do_request(d, bio);
1230
1231 return BLK_QC_T_NONE;
1232}
1233
1234static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode,
1235 unsigned int cmd, unsigned long arg)
1236{
1237 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1238
1239 if (dc->io_disable)
1240 return -EIO;
1241
1242 return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg);
1243}
1244
1245static int cached_dev_congested(void *data, int bits)
1246{
1247 struct bcache_device *d = data;
1248 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1249 struct request_queue *q = bdev_get_queue(dc->bdev);
1250 int ret = 0;
1251
1252 if (bdi_congested(q->backing_dev_info, bits))
1253 return 1;
1254
1255 if (cached_dev_get(dc)) {
1256 unsigned int i;
1257 struct cache *ca;
1258
1259 for_each_cache(ca, d->c, i) {
1260 q = bdev_get_queue(ca->bdev);
1261 ret |= bdi_congested(q->backing_dev_info, bits);
1262 }
1263
1264 cached_dev_put(dc);
1265 }
1266
1267 return ret;
1268}
1269
1270void bch_cached_dev_request_init(struct cached_dev *dc)
1271{
1272 struct gendisk *g = dc->disk.disk;
1273
1274 g->queue->make_request_fn = cached_dev_make_request;
1275 g->queue->backing_dev_info->congested_fn = cached_dev_congested;
1276 dc->disk.cache_miss = cached_dev_cache_miss;
1277 dc->disk.ioctl = cached_dev_ioctl;
1278}
1279
1280/* Flash backed devices */
1281
1282static int flash_dev_cache_miss(struct btree *b, struct search *s,
1283 struct bio *bio, unsigned int sectors)
1284{
1285 unsigned int bytes = min(sectors, bio_sectors(bio)) << 9;
1286
1287 swap(bio->bi_iter.bi_size, bytes);
1288 zero_fill_bio(bio);
1289 swap(bio->bi_iter.bi_size, bytes);
1290
1291 bio_advance(bio, bytes);
1292
1293 if (!bio->bi_iter.bi_size)
1294 return MAP_DONE;
1295
1296 return MAP_CONTINUE;
1297}
1298
1299static void flash_dev_nodata(struct closure *cl)
1300{
1301 struct search *s = container_of(cl, struct search, cl);
1302
1303 if (s->iop.flush_journal)
1304 bch_journal_meta(s->iop.c, cl);
1305
1306 continue_at(cl, search_free, NULL);
1307}
1308
1309static blk_qc_t flash_dev_make_request(struct request_queue *q,
1310 struct bio *bio)
1311{
1312 struct search *s;
1313 struct closure *cl;
1314 struct bcache_device *d = bio->bi_disk->private_data;
1315
1316 if (unlikely(d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags))) {
1317 bio->bi_status = BLK_STS_IOERR;
1318 bio_endio(bio);
1319 return BLK_QC_T_NONE;
1320 }
1321
1322 generic_start_io_acct(q, bio_op(bio), bio_sectors(bio), &d->disk->part0);
1323
1324 s = search_alloc(bio, d);
1325 cl = &s->cl;
1326 bio = &s->bio.bio;
1327
1328 trace_bcache_request_start(s->d, bio);
1329
1330 if (!bio->bi_iter.bi_size) {
1331 /*
1332 * can't call bch_journal_meta from under
1333 * generic_make_request
1334 */
1335 continue_at_nobarrier(&s->cl,
1336 flash_dev_nodata,
1337 bcache_wq);
1338 return BLK_QC_T_NONE;
1339 } else if (bio_data_dir(bio)) {
1340 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
1341 &KEY(d->id, bio->bi_iter.bi_sector, 0),
1342 &KEY(d->id, bio_end_sector(bio), 0));
1343
1344 s->iop.bypass = (bio_op(bio) == REQ_OP_DISCARD) != 0;
1345 s->iop.writeback = true;
1346 s->iop.bio = bio;
1347
1348 closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
1349 } else {
1350 closure_call(&s->iop.cl, cache_lookup, NULL, cl);
1351 }
1352
1353 continue_at(cl, search_free, NULL);
1354 return BLK_QC_T_NONE;
1355}
1356
1357static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode,
1358 unsigned int cmd, unsigned long arg)
1359{
1360 return -ENOTTY;
1361}
1362
1363static int flash_dev_congested(void *data, int bits)
1364{
1365 struct bcache_device *d = data;
1366 struct request_queue *q;
1367 struct cache *ca;
1368 unsigned int i;
1369 int ret = 0;
1370
1371 for_each_cache(ca, d->c, i) {
1372 q = bdev_get_queue(ca->bdev);
1373 ret |= bdi_congested(q->backing_dev_info, bits);
1374 }
1375
1376 return ret;
1377}
1378
1379void bch_flash_dev_request_init(struct bcache_device *d)
1380{
1381 struct gendisk *g = d->disk;
1382
1383 g->queue->make_request_fn = flash_dev_make_request;
1384 g->queue->backing_dev_info->congested_fn = flash_dev_congested;
1385 d->cache_miss = flash_dev_cache_miss;
1386 d->ioctl = flash_dev_ioctl;
1387}
1388
1389void bch_request_exit(void)
1390{
1391 kmem_cache_destroy(bch_search_cache);
1392}
1393
1394int __init bch_request_init(void)
1395{
1396 bch_search_cache = KMEM_CACHE(search, 0);
1397 if (!bch_search_cache)
1398 return -ENOMEM;
1399
1400 return 0;
1401}