Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Main bcache entry point - handle a read or a write request and decide what to
4 * do with it; the make_request functions are called by the block layer.
5 *
6 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
7 * Copyright 2012 Google, Inc.
8 */
9
10#include "bcache.h"
11#include "btree.h"
12#include "debug.h"
13#include "request.h"
14#include "writeback.h"
15
16#include <linux/module.h>
17#include <linux/hash.h>
18#include <linux/random.h>
19#include <linux/backing-dev.h>
20
21#include <trace/events/bcache.h>
22
23#define CUTOFF_CACHE_ADD 95
24#define CUTOFF_CACHE_READA 90
25
26struct kmem_cache *bch_search_cache;
27
28static void bch_data_insert_start(struct closure *cl);
29
30static unsigned int cache_mode(struct cached_dev *dc)
31{
32 return BDEV_CACHE_MODE(&dc->sb);
33}
34
35static bool verify(struct cached_dev *dc)
36{
37 return dc->verify;
38}
39
40static void bio_csum(struct bio *bio, struct bkey *k)
41{
42 struct bio_vec bv;
43 struct bvec_iter iter;
44 uint64_t csum = 0;
45
46 bio_for_each_segment(bv, bio, iter) {
47 void *d = kmap(bv.bv_page) + bv.bv_offset;
48
49 csum = bch_crc64_update(csum, d, bv.bv_len);
50 kunmap(bv.bv_page);
51 }
52
53 k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1);
54}
55
56/* Insert data into cache */
57
58static void bch_data_insert_keys(struct closure *cl)
59{
60 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
61 atomic_t *journal_ref = NULL;
62 struct bkey *replace_key = op->replace ? &op->replace_key : NULL;
63 int ret;
64
65 if (!op->replace)
66 journal_ref = bch_journal(op->c, &op->insert_keys,
67 op->flush_journal ? cl : NULL);
68
69 ret = bch_btree_insert(op->c, &op->insert_keys,
70 journal_ref, replace_key);
71 if (ret == -ESRCH) {
72 op->replace_collision = true;
73 } else if (ret) {
74 op->status = BLK_STS_RESOURCE;
75 op->insert_data_done = true;
76 }
77
78 if (journal_ref)
79 atomic_dec_bug(journal_ref);
80
81 if (!op->insert_data_done) {
82 continue_at(cl, bch_data_insert_start, op->wq);
83 return;
84 }
85
86 bch_keylist_free(&op->insert_keys);
87 closure_return(cl);
88}
89
90static int bch_keylist_realloc(struct keylist *l, unsigned int u64s,
91 struct cache_set *c)
92{
93 size_t oldsize = bch_keylist_nkeys(l);
94 size_t newsize = oldsize + u64s;
95
96 /*
97 * The journalling code doesn't handle the case where the keys to insert
98 * is bigger than an empty write: If we just return -ENOMEM here,
99 * bch_data_insert_keys() will insert the keys created so far
100 * and finish the rest when the keylist is empty.
101 */
102 if (newsize * sizeof(uint64_t) > block_bytes(c) - sizeof(struct jset))
103 return -ENOMEM;
104
105 return __bch_keylist_realloc(l, u64s);
106}
107
108static void bch_data_invalidate(struct closure *cl)
109{
110 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
111 struct bio *bio = op->bio;
112
113 pr_debug("invalidating %i sectors from %llu\n",
114 bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector);
115
116 while (bio_sectors(bio)) {
117 unsigned int sectors = min(bio_sectors(bio),
118 1U << (KEY_SIZE_BITS - 1));
119
120 if (bch_keylist_realloc(&op->insert_keys, 2, op->c))
121 goto out;
122
123 bio->bi_iter.bi_sector += sectors;
124 bio->bi_iter.bi_size -= sectors << 9;
125
126 bch_keylist_add(&op->insert_keys,
127 &KEY(op->inode,
128 bio->bi_iter.bi_sector,
129 sectors));
130 }
131
132 op->insert_data_done = true;
133 /* get in bch_data_insert() */
134 bio_put(bio);
135out:
136 continue_at(cl, bch_data_insert_keys, op->wq);
137}
138
139static void bch_data_insert_error(struct closure *cl)
140{
141 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
142
143 /*
144 * Our data write just errored, which means we've got a bunch of keys to
145 * insert that point to data that wasn't successfully written.
146 *
147 * We don't have to insert those keys but we still have to invalidate
148 * that region of the cache - so, if we just strip off all the pointers
149 * from the keys we'll accomplish just that.
150 */
151
152 struct bkey *src = op->insert_keys.keys, *dst = op->insert_keys.keys;
153
154 while (src != op->insert_keys.top) {
155 struct bkey *n = bkey_next(src);
156
157 SET_KEY_PTRS(src, 0);
158 memmove(dst, src, bkey_bytes(src));
159
160 dst = bkey_next(dst);
161 src = n;
162 }
163
164 op->insert_keys.top = dst;
165
166 bch_data_insert_keys(cl);
167}
168
169static void bch_data_insert_endio(struct bio *bio)
170{
171 struct closure *cl = bio->bi_private;
172 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
173
174 if (bio->bi_status) {
175 /* TODO: We could try to recover from this. */
176 if (op->writeback)
177 op->status = bio->bi_status;
178 else if (!op->replace)
179 set_closure_fn(cl, bch_data_insert_error, op->wq);
180 else
181 set_closure_fn(cl, NULL, NULL);
182 }
183
184 bch_bbio_endio(op->c, bio, bio->bi_status, "writing data to cache");
185}
186
187static void bch_data_insert_start(struct closure *cl)
188{
189 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
190 struct bio *bio = op->bio, *n;
191
192 if (op->bypass)
193 return bch_data_invalidate(cl);
194
195 if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0)
196 wake_up_gc(op->c);
197
198 /*
199 * Journal writes are marked REQ_PREFLUSH; if the original write was a
200 * flush, it'll wait on the journal write.
201 */
202 bio->bi_opf &= ~(REQ_PREFLUSH|REQ_FUA);
203
204 do {
205 unsigned int i;
206 struct bkey *k;
207 struct bio_set *split = &op->c->bio_split;
208
209 /* 1 for the device pointer and 1 for the chksum */
210 if (bch_keylist_realloc(&op->insert_keys,
211 3 + (op->csum ? 1 : 0),
212 op->c)) {
213 continue_at(cl, bch_data_insert_keys, op->wq);
214 return;
215 }
216
217 k = op->insert_keys.top;
218 bkey_init(k);
219 SET_KEY_INODE(k, op->inode);
220 SET_KEY_OFFSET(k, bio->bi_iter.bi_sector);
221
222 if (!bch_alloc_sectors(op->c, k, bio_sectors(bio),
223 op->write_point, op->write_prio,
224 op->writeback))
225 goto err;
226
227 n = bio_next_split(bio, KEY_SIZE(k), GFP_NOIO, split);
228
229 n->bi_end_io = bch_data_insert_endio;
230 n->bi_private = cl;
231
232 if (op->writeback) {
233 SET_KEY_DIRTY(k, true);
234
235 for (i = 0; i < KEY_PTRS(k); i++)
236 SET_GC_MARK(PTR_BUCKET(op->c, k, i),
237 GC_MARK_DIRTY);
238 }
239
240 SET_KEY_CSUM(k, op->csum);
241 if (KEY_CSUM(k))
242 bio_csum(n, k);
243
244 trace_bcache_cache_insert(k);
245 bch_keylist_push(&op->insert_keys);
246
247 bio_set_op_attrs(n, REQ_OP_WRITE, 0);
248 bch_submit_bbio(n, op->c, k, 0);
249 } while (n != bio);
250
251 op->insert_data_done = true;
252 continue_at(cl, bch_data_insert_keys, op->wq);
253 return;
254err:
255 /* bch_alloc_sectors() blocks if s->writeback = true */
256 BUG_ON(op->writeback);
257
258 /*
259 * But if it's not a writeback write we'd rather just bail out if
260 * there aren't any buckets ready to write to - it might take awhile and
261 * we might be starving btree writes for gc or something.
262 */
263
264 if (!op->replace) {
265 /*
266 * Writethrough write: We can't complete the write until we've
267 * updated the index. But we don't want to delay the write while
268 * we wait for buckets to be freed up, so just invalidate the
269 * rest of the write.
270 */
271 op->bypass = true;
272 return bch_data_invalidate(cl);
273 } else {
274 /*
275 * From a cache miss, we can just insert the keys for the data
276 * we have written or bail out if we didn't do anything.
277 */
278 op->insert_data_done = true;
279 bio_put(bio);
280
281 if (!bch_keylist_empty(&op->insert_keys))
282 continue_at(cl, bch_data_insert_keys, op->wq);
283 else
284 closure_return(cl);
285 }
286}
287
288/**
289 * bch_data_insert - stick some data in the cache
290 * @cl: closure pointer.
291 *
292 * This is the starting point for any data to end up in a cache device; it could
293 * be from a normal write, or a writeback write, or a write to a flash only
294 * volume - it's also used by the moving garbage collector to compact data in
295 * mostly empty buckets.
296 *
297 * It first writes the data to the cache, creating a list of keys to be inserted
298 * (if the data had to be fragmented there will be multiple keys); after the
299 * data is written it calls bch_journal, and after the keys have been added to
300 * the next journal write they're inserted into the btree.
301 *
302 * It inserts the data in op->bio; bi_sector is used for the key offset,
303 * and op->inode is used for the key inode.
304 *
305 * If op->bypass is true, instead of inserting the data it invalidates the
306 * region of the cache represented by op->bio and op->inode.
307 */
308void bch_data_insert(struct closure *cl)
309{
310 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
311
312 trace_bcache_write(op->c, op->inode, op->bio,
313 op->writeback, op->bypass);
314
315 bch_keylist_init(&op->insert_keys);
316 bio_get(op->bio);
317 bch_data_insert_start(cl);
318}
319
320/*
321 * Congested? Return 0 (not congested) or the limit (in sectors)
322 * beyond which we should bypass the cache due to congestion.
323 */
324unsigned int bch_get_congested(const struct cache_set *c)
325{
326 int i;
327
328 if (!c->congested_read_threshold_us &&
329 !c->congested_write_threshold_us)
330 return 0;
331
332 i = (local_clock_us() - c->congested_last_us) / 1024;
333 if (i < 0)
334 return 0;
335
336 i += atomic_read(&c->congested);
337 if (i >= 0)
338 return 0;
339
340 i += CONGESTED_MAX;
341
342 if (i > 0)
343 i = fract_exp_two(i, 6);
344
345 i -= hweight32(get_random_u32());
346
347 return i > 0 ? i : 1;
348}
349
350static void add_sequential(struct task_struct *t)
351{
352 ewma_add(t->sequential_io_avg,
353 t->sequential_io, 8, 0);
354
355 t->sequential_io = 0;
356}
357
358static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)
359{
360 return &dc->io_hash[hash_64(k, RECENT_IO_BITS)];
361}
362
363static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
364{
365 struct cache_set *c = dc->disk.c;
366 unsigned int mode = cache_mode(dc);
367 unsigned int sectors, congested;
368 struct task_struct *task = current;
369 struct io *i;
370
371 if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
372 c->gc_stats.in_use > CUTOFF_CACHE_ADD ||
373 (bio_op(bio) == REQ_OP_DISCARD))
374 goto skip;
375
376 if (mode == CACHE_MODE_NONE ||
377 (mode == CACHE_MODE_WRITEAROUND &&
378 op_is_write(bio_op(bio))))
379 goto skip;
380
381 /*
382 * If the bio is for read-ahead or background IO, bypass it or
383 * not depends on the following situations,
384 * - If the IO is for meta data, always cache it and no bypass
385 * - If the IO is not meta data, check dc->cache_reada_policy,
386 * BCH_CACHE_READA_ALL: cache it and not bypass
387 * BCH_CACHE_READA_META_ONLY: not cache it and bypass
388 * That is, read-ahead request for metadata always get cached
389 * (eg, for gfs2 or xfs).
390 */
391 if ((bio->bi_opf & (REQ_RAHEAD|REQ_BACKGROUND))) {
392 if (!(bio->bi_opf & (REQ_META|REQ_PRIO)) &&
393 (dc->cache_readahead_policy != BCH_CACHE_READA_ALL))
394 goto skip;
395 }
396
397 if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
398 bio_sectors(bio) & (c->sb.block_size - 1)) {
399 pr_debug("skipping unaligned io\n");
400 goto skip;
401 }
402
403 if (bypass_torture_test(dc)) {
404 if ((get_random_int() & 3) == 3)
405 goto skip;
406 else
407 goto rescale;
408 }
409
410 congested = bch_get_congested(c);
411 if (!congested && !dc->sequential_cutoff)
412 goto rescale;
413
414 spin_lock(&dc->io_lock);
415
416 hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash)
417 if (i->last == bio->bi_iter.bi_sector &&
418 time_before(jiffies, i->jiffies))
419 goto found;
420
421 i = list_first_entry(&dc->io_lru, struct io, lru);
422
423 add_sequential(task);
424 i->sequential = 0;
425found:
426 if (i->sequential + bio->bi_iter.bi_size > i->sequential)
427 i->sequential += bio->bi_iter.bi_size;
428
429 i->last = bio_end_sector(bio);
430 i->jiffies = jiffies + msecs_to_jiffies(5000);
431 task->sequential_io = i->sequential;
432
433 hlist_del(&i->hash);
434 hlist_add_head(&i->hash, iohash(dc, i->last));
435 list_move_tail(&i->lru, &dc->io_lru);
436
437 spin_unlock(&dc->io_lock);
438
439 sectors = max(task->sequential_io,
440 task->sequential_io_avg) >> 9;
441
442 if (dc->sequential_cutoff &&
443 sectors >= dc->sequential_cutoff >> 9) {
444 trace_bcache_bypass_sequential(bio);
445 goto skip;
446 }
447
448 if (congested && sectors >= congested) {
449 trace_bcache_bypass_congested(bio);
450 goto skip;
451 }
452
453rescale:
454 bch_rescale_priorities(c, bio_sectors(bio));
455 return false;
456skip:
457 bch_mark_sectors_bypassed(c, dc, bio_sectors(bio));
458 return true;
459}
460
461/* Cache lookup */
462
463struct search {
464 /* Stack frame for bio_complete */
465 struct closure cl;
466
467 struct bbio bio;
468 struct bio *orig_bio;
469 struct bio *cache_miss;
470 struct bcache_device *d;
471
472 unsigned int insert_bio_sectors;
473 unsigned int recoverable:1;
474 unsigned int write:1;
475 unsigned int read_dirty_data:1;
476 unsigned int cache_missed:1;
477
478 unsigned long start_time;
479
480 struct btree_op op;
481 struct data_insert_op iop;
482};
483
484static void bch_cache_read_endio(struct bio *bio)
485{
486 struct bbio *b = container_of(bio, struct bbio, bio);
487 struct closure *cl = bio->bi_private;
488 struct search *s = container_of(cl, struct search, cl);
489
490 /*
491 * If the bucket was reused while our bio was in flight, we might have
492 * read the wrong data. Set s->error but not error so it doesn't get
493 * counted against the cache device, but we'll still reread the data
494 * from the backing device.
495 */
496
497 if (bio->bi_status)
498 s->iop.status = bio->bi_status;
499 else if (!KEY_DIRTY(&b->key) &&
500 ptr_stale(s->iop.c, &b->key, 0)) {
501 atomic_long_inc(&s->iop.c->cache_read_races);
502 s->iop.status = BLK_STS_IOERR;
503 }
504
505 bch_bbio_endio(s->iop.c, bio, bio->bi_status, "reading from cache");
506}
507
508/*
509 * Read from a single key, handling the initial cache miss if the key starts in
510 * the middle of the bio
511 */
512static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
513{
514 struct search *s = container_of(op, struct search, op);
515 struct bio *n, *bio = &s->bio.bio;
516 struct bkey *bio_key;
517 unsigned int ptr;
518
519 if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0)
520 return MAP_CONTINUE;
521
522 if (KEY_INODE(k) != s->iop.inode ||
523 KEY_START(k) > bio->bi_iter.bi_sector) {
524 unsigned int bio_sectors = bio_sectors(bio);
525 unsigned int sectors = KEY_INODE(k) == s->iop.inode
526 ? min_t(uint64_t, INT_MAX,
527 KEY_START(k) - bio->bi_iter.bi_sector)
528 : INT_MAX;
529 int ret = s->d->cache_miss(b, s, bio, sectors);
530
531 if (ret != MAP_CONTINUE)
532 return ret;
533
534 /* if this was a complete miss we shouldn't get here */
535 BUG_ON(bio_sectors <= sectors);
536 }
537
538 if (!KEY_SIZE(k))
539 return MAP_CONTINUE;
540
541 /* XXX: figure out best pointer - for multiple cache devices */
542 ptr = 0;
543
544 PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO;
545
546 if (KEY_DIRTY(k))
547 s->read_dirty_data = true;
548
549 n = bio_next_split(bio, min_t(uint64_t, INT_MAX,
550 KEY_OFFSET(k) - bio->bi_iter.bi_sector),
551 GFP_NOIO, &s->d->bio_split);
552
553 bio_key = &container_of(n, struct bbio, bio)->key;
554 bch_bkey_copy_single_ptr(bio_key, k, ptr);
555
556 bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key);
557 bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key);
558
559 n->bi_end_io = bch_cache_read_endio;
560 n->bi_private = &s->cl;
561
562 /*
563 * The bucket we're reading from might be reused while our bio
564 * is in flight, and we could then end up reading the wrong
565 * data.
566 *
567 * We guard against this by checking (in cache_read_endio()) if
568 * the pointer is stale again; if so, we treat it as an error
569 * and reread from the backing device (but we don't pass that
570 * error up anywhere).
571 */
572
573 __bch_submit_bbio(n, b->c);
574 return n == bio ? MAP_DONE : MAP_CONTINUE;
575}
576
577static void cache_lookup(struct closure *cl)
578{
579 struct search *s = container_of(cl, struct search, iop.cl);
580 struct bio *bio = &s->bio.bio;
581 struct cached_dev *dc;
582 int ret;
583
584 bch_btree_op_init(&s->op, -1);
585
586 ret = bch_btree_map_keys(&s->op, s->iop.c,
587 &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0),
588 cache_lookup_fn, MAP_END_KEY);
589 if (ret == -EAGAIN) {
590 continue_at(cl, cache_lookup, bcache_wq);
591 return;
592 }
593
594 /*
595 * We might meet err when searching the btree, If that happens, we will
596 * get negative ret, in this scenario we should not recover data from
597 * backing device (when cache device is dirty) because we don't know
598 * whether bkeys the read request covered are all clean.
599 *
600 * And after that happened, s->iop.status is still its initial value
601 * before we submit s->bio.bio
602 */
603 if (ret < 0) {
604 BUG_ON(ret == -EINTR);
605 if (s->d && s->d->c &&
606 !UUID_FLASH_ONLY(&s->d->c->uuids[s->d->id])) {
607 dc = container_of(s->d, struct cached_dev, disk);
608 if (dc && atomic_read(&dc->has_dirty))
609 s->recoverable = false;
610 }
611 if (!s->iop.status)
612 s->iop.status = BLK_STS_IOERR;
613 }
614
615 closure_return(cl);
616}
617
618/* Common code for the make_request functions */
619
620static void request_endio(struct bio *bio)
621{
622 struct closure *cl = bio->bi_private;
623
624 if (bio->bi_status) {
625 struct search *s = container_of(cl, struct search, cl);
626
627 s->iop.status = bio->bi_status;
628 /* Only cache read errors are recoverable */
629 s->recoverable = false;
630 }
631
632 bio_put(bio);
633 closure_put(cl);
634}
635
636static void backing_request_endio(struct bio *bio)
637{
638 struct closure *cl = bio->bi_private;
639
640 if (bio->bi_status) {
641 struct search *s = container_of(cl, struct search, cl);
642 struct cached_dev *dc = container_of(s->d,
643 struct cached_dev, disk);
644 /*
645 * If a bio has REQ_PREFLUSH for writeback mode, it is
646 * speically assembled in cached_dev_write() for a non-zero
647 * write request which has REQ_PREFLUSH. we don't set
648 * s->iop.status by this failure, the status will be decided
649 * by result of bch_data_insert() operation.
650 */
651 if (unlikely(s->iop.writeback &&
652 bio->bi_opf & REQ_PREFLUSH)) {
653 pr_err("Can't flush %s: returned bi_status %i\n",
654 dc->backing_dev_name, bio->bi_status);
655 } else {
656 /* set to orig_bio->bi_status in bio_complete() */
657 s->iop.status = bio->bi_status;
658 }
659 s->recoverable = false;
660 /* should count I/O error for backing device here */
661 bch_count_backing_io_errors(dc, bio);
662 }
663
664 bio_put(bio);
665 closure_put(cl);
666}
667
668static void bio_complete(struct search *s)
669{
670 if (s->orig_bio) {
671 /* Count on bcache device */
672 disk_end_io_acct(s->d->disk, bio_op(s->orig_bio), s->start_time);
673
674 trace_bcache_request_end(s->d, s->orig_bio);
675 s->orig_bio->bi_status = s->iop.status;
676 bio_endio(s->orig_bio);
677 s->orig_bio = NULL;
678 }
679}
680
681static void do_bio_hook(struct search *s,
682 struct bio *orig_bio,
683 bio_end_io_t *end_io_fn)
684{
685 struct bio *bio = &s->bio.bio;
686
687 bio_init(bio, NULL, 0);
688 __bio_clone_fast(bio, orig_bio);
689 /*
690 * bi_end_io can be set separately somewhere else, e.g. the
691 * variants in,
692 * - cache_bio->bi_end_io from cached_dev_cache_miss()
693 * - n->bi_end_io from cache_lookup_fn()
694 */
695 bio->bi_end_io = end_io_fn;
696 bio->bi_private = &s->cl;
697
698 bio_cnt_set(bio, 3);
699}
700
701static void search_free(struct closure *cl)
702{
703 struct search *s = container_of(cl, struct search, cl);
704
705 atomic_dec(&s->iop.c->search_inflight);
706
707 if (s->iop.bio)
708 bio_put(s->iop.bio);
709
710 bio_complete(s);
711 closure_debug_destroy(cl);
712 mempool_free(s, &s->iop.c->search);
713}
714
715static inline struct search *search_alloc(struct bio *bio,
716 struct bcache_device *d)
717{
718 struct search *s;
719
720 s = mempool_alloc(&d->c->search, GFP_NOIO);
721
722 closure_init(&s->cl, NULL);
723 do_bio_hook(s, bio, request_endio);
724 atomic_inc(&d->c->search_inflight);
725
726 s->orig_bio = bio;
727 s->cache_miss = NULL;
728 s->cache_missed = 0;
729 s->d = d;
730 s->recoverable = 1;
731 s->write = op_is_write(bio_op(bio));
732 s->read_dirty_data = 0;
733 /* Count on the bcache device */
734 s->start_time = disk_start_io_acct(d->disk, bio_sectors(bio), bio_op(bio));
735 s->iop.c = d->c;
736 s->iop.bio = NULL;
737 s->iop.inode = d->id;
738 s->iop.write_point = hash_long((unsigned long) current, 16);
739 s->iop.write_prio = 0;
740 s->iop.status = 0;
741 s->iop.flags = 0;
742 s->iop.flush_journal = op_is_flush(bio->bi_opf);
743 s->iop.wq = bcache_wq;
744
745 return s;
746}
747
748/* Cached devices */
749
750static void cached_dev_bio_complete(struct closure *cl)
751{
752 struct search *s = container_of(cl, struct search, cl);
753 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
754
755 cached_dev_put(dc);
756 search_free(cl);
757}
758
759/* Process reads */
760
761static void cached_dev_read_error_done(struct closure *cl)
762{
763 struct search *s = container_of(cl, struct search, cl);
764
765 if (s->iop.replace_collision)
766 bch_mark_cache_miss_collision(s->iop.c, s->d);
767
768 if (s->iop.bio)
769 bio_free_pages(s->iop.bio);
770
771 cached_dev_bio_complete(cl);
772}
773
774static void cached_dev_read_error(struct closure *cl)
775{
776 struct search *s = container_of(cl, struct search, cl);
777 struct bio *bio = &s->bio.bio;
778
779 /*
780 * If read request hit dirty data (s->read_dirty_data is true),
781 * then recovery a failed read request from cached device may
782 * get a stale data back. So read failure recovery is only
783 * permitted when read request hit clean data in cache device,
784 * or when cache read race happened.
785 */
786 if (s->recoverable && !s->read_dirty_data) {
787 /* Retry from the backing device: */
788 trace_bcache_read_retry(s->orig_bio);
789
790 s->iop.status = 0;
791 do_bio_hook(s, s->orig_bio, backing_request_endio);
792
793 /* XXX: invalidate cache */
794
795 /* I/O request sent to backing device */
796 closure_bio_submit(s->iop.c, bio, cl);
797 }
798
799 continue_at(cl, cached_dev_read_error_done, NULL);
800}
801
802static void cached_dev_cache_miss_done(struct closure *cl)
803{
804 struct search *s = container_of(cl, struct search, cl);
805 struct bcache_device *d = s->d;
806
807 if (s->iop.replace_collision)
808 bch_mark_cache_miss_collision(s->iop.c, s->d);
809
810 if (s->iop.bio)
811 bio_free_pages(s->iop.bio);
812
813 cached_dev_bio_complete(cl);
814 closure_put(&d->cl);
815}
816
817static void cached_dev_read_done(struct closure *cl)
818{
819 struct search *s = container_of(cl, struct search, cl);
820 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
821
822 /*
823 * We had a cache miss; cache_bio now contains data ready to be inserted
824 * into the cache.
825 *
826 * First, we copy the data we just read from cache_bio's bounce buffers
827 * to the buffers the original bio pointed to:
828 */
829
830 if (s->iop.bio) {
831 bio_reset(s->iop.bio);
832 s->iop.bio->bi_iter.bi_sector =
833 s->cache_miss->bi_iter.bi_sector;
834 bio_copy_dev(s->iop.bio, s->cache_miss);
835 s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
836 bch_bio_map(s->iop.bio, NULL);
837
838 bio_copy_data(s->cache_miss, s->iop.bio);
839
840 bio_put(s->cache_miss);
841 s->cache_miss = NULL;
842 }
843
844 if (verify(dc) && s->recoverable && !s->read_dirty_data)
845 bch_data_verify(dc, s->orig_bio);
846
847 closure_get(&dc->disk.cl);
848 bio_complete(s);
849
850 if (s->iop.bio &&
851 !test_bit(CACHE_SET_STOPPING, &s->iop.c->flags)) {
852 BUG_ON(!s->iop.replace);
853 closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
854 }
855
856 continue_at(cl, cached_dev_cache_miss_done, NULL);
857}
858
859static void cached_dev_read_done_bh(struct closure *cl)
860{
861 struct search *s = container_of(cl, struct search, cl);
862 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
863
864 bch_mark_cache_accounting(s->iop.c, s->d,
865 !s->cache_missed, s->iop.bypass);
866 trace_bcache_read(s->orig_bio, !s->cache_missed, s->iop.bypass);
867
868 if (s->iop.status)
869 continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq);
870 else if (s->iop.bio || verify(dc))
871 continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq);
872 else
873 continue_at_nobarrier(cl, cached_dev_bio_complete, NULL);
874}
875
876static int cached_dev_cache_miss(struct btree *b, struct search *s,
877 struct bio *bio, unsigned int sectors)
878{
879 int ret = MAP_CONTINUE;
880 unsigned int reada = 0;
881 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
882 struct bio *miss, *cache_bio;
883
884 s->cache_missed = 1;
885
886 if (s->cache_miss || s->iop.bypass) {
887 miss = bio_next_split(bio, sectors, GFP_NOIO, &s->d->bio_split);
888 ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
889 goto out_submit;
890 }
891
892 if (!(bio->bi_opf & REQ_RAHEAD) &&
893 !(bio->bi_opf & (REQ_META|REQ_PRIO)) &&
894 s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)
895 reada = min_t(sector_t, dc->readahead >> 9,
896 get_capacity(bio->bi_disk) - bio_end_sector(bio));
897
898 s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada);
899
900 s->iop.replace_key = KEY(s->iop.inode,
901 bio->bi_iter.bi_sector + s->insert_bio_sectors,
902 s->insert_bio_sectors);
903
904 ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key);
905 if (ret)
906 return ret;
907
908 s->iop.replace = true;
909
910 miss = bio_next_split(bio, sectors, GFP_NOIO, &s->d->bio_split);
911
912 /* btree_search_recurse()'s btree iterator is no good anymore */
913 ret = miss == bio ? MAP_DONE : -EINTR;
914
915 cache_bio = bio_alloc_bioset(GFP_NOWAIT,
916 DIV_ROUND_UP(s->insert_bio_sectors, PAGE_SECTORS),
917 &dc->disk.bio_split);
918 if (!cache_bio)
919 goto out_submit;
920
921 cache_bio->bi_iter.bi_sector = miss->bi_iter.bi_sector;
922 bio_copy_dev(cache_bio, miss);
923 cache_bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
924
925 cache_bio->bi_end_io = backing_request_endio;
926 cache_bio->bi_private = &s->cl;
927
928 bch_bio_map(cache_bio, NULL);
929 if (bch_bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO))
930 goto out_put;
931
932 if (reada)
933 bch_mark_cache_readahead(s->iop.c, s->d);
934
935 s->cache_miss = miss;
936 s->iop.bio = cache_bio;
937 bio_get(cache_bio);
938 /* I/O request sent to backing device */
939 closure_bio_submit(s->iop.c, cache_bio, &s->cl);
940
941 return ret;
942out_put:
943 bio_put(cache_bio);
944out_submit:
945 miss->bi_end_io = backing_request_endio;
946 miss->bi_private = &s->cl;
947 /* I/O request sent to backing device */
948 closure_bio_submit(s->iop.c, miss, &s->cl);
949 return ret;
950}
951
952static void cached_dev_read(struct cached_dev *dc, struct search *s)
953{
954 struct closure *cl = &s->cl;
955
956 closure_call(&s->iop.cl, cache_lookup, NULL, cl);
957 continue_at(cl, cached_dev_read_done_bh, NULL);
958}
959
960/* Process writes */
961
962static void cached_dev_write_complete(struct closure *cl)
963{
964 struct search *s = container_of(cl, struct search, cl);
965 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
966
967 up_read_non_owner(&dc->writeback_lock);
968 cached_dev_bio_complete(cl);
969}
970
971static void cached_dev_write(struct cached_dev *dc, struct search *s)
972{
973 struct closure *cl = &s->cl;
974 struct bio *bio = &s->bio.bio;
975 struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0);
976 struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0);
977
978 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end);
979
980 down_read_non_owner(&dc->writeback_lock);
981 if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) {
982 /*
983 * We overlap with some dirty data undergoing background
984 * writeback, force this write to writeback
985 */
986 s->iop.bypass = false;
987 s->iop.writeback = true;
988 }
989
990 /*
991 * Discards aren't _required_ to do anything, so skipping if
992 * check_overlapping returned true is ok
993 *
994 * But check_overlapping drops dirty keys for which io hasn't started,
995 * so we still want to call it.
996 */
997 if (bio_op(bio) == REQ_OP_DISCARD)
998 s->iop.bypass = true;
999
1000 if (should_writeback(dc, s->orig_bio,
1001 cache_mode(dc),
1002 s->iop.bypass)) {
1003 s->iop.bypass = false;
1004 s->iop.writeback = true;
1005 }
1006
1007 if (s->iop.bypass) {
1008 s->iop.bio = s->orig_bio;
1009 bio_get(s->iop.bio);
1010
1011 if (bio_op(bio) == REQ_OP_DISCARD &&
1012 !blk_queue_discard(bdev_get_queue(dc->bdev)))
1013 goto insert_data;
1014
1015 /* I/O request sent to backing device */
1016 bio->bi_end_io = backing_request_endio;
1017 closure_bio_submit(s->iop.c, bio, cl);
1018
1019 } else if (s->iop.writeback) {
1020 bch_writeback_add(dc);
1021 s->iop.bio = bio;
1022
1023 if (bio->bi_opf & REQ_PREFLUSH) {
1024 /*
1025 * Also need to send a flush to the backing
1026 * device.
1027 */
1028 struct bio *flush;
1029
1030 flush = bio_alloc_bioset(GFP_NOIO, 0,
1031 &dc->disk.bio_split);
1032 if (!flush) {
1033 s->iop.status = BLK_STS_RESOURCE;
1034 goto insert_data;
1035 }
1036 bio_copy_dev(flush, bio);
1037 flush->bi_end_io = backing_request_endio;
1038 flush->bi_private = cl;
1039 flush->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
1040 /* I/O request sent to backing device */
1041 closure_bio_submit(s->iop.c, flush, cl);
1042 }
1043 } else {
1044 s->iop.bio = bio_clone_fast(bio, GFP_NOIO, &dc->disk.bio_split);
1045 /* I/O request sent to backing device */
1046 bio->bi_end_io = backing_request_endio;
1047 closure_bio_submit(s->iop.c, bio, cl);
1048 }
1049
1050insert_data:
1051 closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
1052 continue_at(cl, cached_dev_write_complete, NULL);
1053}
1054
1055static void cached_dev_nodata(struct closure *cl)
1056{
1057 struct search *s = container_of(cl, struct search, cl);
1058 struct bio *bio = &s->bio.bio;
1059
1060 if (s->iop.flush_journal)
1061 bch_journal_meta(s->iop.c, cl);
1062
1063 /* If it's a flush, we send the flush to the backing device too */
1064 bio->bi_end_io = backing_request_endio;
1065 closure_bio_submit(s->iop.c, bio, cl);
1066
1067 continue_at(cl, cached_dev_bio_complete, NULL);
1068}
1069
1070struct detached_dev_io_private {
1071 struct bcache_device *d;
1072 unsigned long start_time;
1073 bio_end_io_t *bi_end_io;
1074 void *bi_private;
1075};
1076
1077static void detached_dev_end_io(struct bio *bio)
1078{
1079 struct detached_dev_io_private *ddip;
1080
1081 ddip = bio->bi_private;
1082 bio->bi_end_io = ddip->bi_end_io;
1083 bio->bi_private = ddip->bi_private;
1084
1085 /* Count on the bcache device */
1086 disk_end_io_acct(ddip->d->disk, bio_op(bio), ddip->start_time);
1087
1088 if (bio->bi_status) {
1089 struct cached_dev *dc = container_of(ddip->d,
1090 struct cached_dev, disk);
1091 /* should count I/O error for backing device here */
1092 bch_count_backing_io_errors(dc, bio);
1093 }
1094
1095 kfree(ddip);
1096 bio->bi_end_io(bio);
1097}
1098
1099static void detached_dev_do_request(struct bcache_device *d, struct bio *bio)
1100{
1101 struct detached_dev_io_private *ddip;
1102 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1103
1104 /*
1105 * no need to call closure_get(&dc->disk.cl),
1106 * because upper layer had already opened bcache device,
1107 * which would call closure_get(&dc->disk.cl)
1108 */
1109 ddip = kzalloc(sizeof(struct detached_dev_io_private), GFP_NOIO);
1110 ddip->d = d;
1111 /* Count on the bcache device */
1112 ddip->start_time = disk_start_io_acct(d->disk, bio_sectors(bio), bio_op(bio));
1113 ddip->bi_end_io = bio->bi_end_io;
1114 ddip->bi_private = bio->bi_private;
1115 bio->bi_end_io = detached_dev_end_io;
1116 bio->bi_private = ddip;
1117
1118 if ((bio_op(bio) == REQ_OP_DISCARD) &&
1119 !blk_queue_discard(bdev_get_queue(dc->bdev)))
1120 bio->bi_end_io(bio);
1121 else
1122 submit_bio_noacct(bio);
1123}
1124
1125static void quit_max_writeback_rate(struct cache_set *c,
1126 struct cached_dev *this_dc)
1127{
1128 int i;
1129 struct bcache_device *d;
1130 struct cached_dev *dc;
1131
1132 /*
1133 * mutex bch_register_lock may compete with other parallel requesters,
1134 * or attach/detach operations on other backing device. Waiting to
1135 * the mutex lock may increase I/O request latency for seconds or more.
1136 * To avoid such situation, if mutext_trylock() failed, only writeback
1137 * rate of current cached device is set to 1, and __update_write_back()
1138 * will decide writeback rate of other cached devices (remember now
1139 * c->idle_counter is 0 already).
1140 */
1141 if (mutex_trylock(&bch_register_lock)) {
1142 for (i = 0; i < c->devices_max_used; i++) {
1143 if (!c->devices[i])
1144 continue;
1145
1146 if (UUID_FLASH_ONLY(&c->uuids[i]))
1147 continue;
1148
1149 d = c->devices[i];
1150 dc = container_of(d, struct cached_dev, disk);
1151 /*
1152 * set writeback rate to default minimum value,
1153 * then let update_writeback_rate() to decide the
1154 * upcoming rate.
1155 */
1156 atomic_long_set(&dc->writeback_rate.rate, 1);
1157 }
1158 mutex_unlock(&bch_register_lock);
1159 } else
1160 atomic_long_set(&this_dc->writeback_rate.rate, 1);
1161}
1162
1163/* Cached devices - read & write stuff */
1164
1165blk_qc_t cached_dev_submit_bio(struct bio *bio)
1166{
1167 struct search *s;
1168 struct bcache_device *d = bio->bi_disk->private_data;
1169 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1170 int rw = bio_data_dir(bio);
1171
1172 if (unlikely((d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags)) ||
1173 dc->io_disable)) {
1174 bio->bi_status = BLK_STS_IOERR;
1175 bio_endio(bio);
1176 return BLK_QC_T_NONE;
1177 }
1178
1179 if (likely(d->c)) {
1180 if (atomic_read(&d->c->idle_counter))
1181 atomic_set(&d->c->idle_counter, 0);
1182 /*
1183 * If at_max_writeback_rate of cache set is true and new I/O
1184 * comes, quit max writeback rate of all cached devices
1185 * attached to this cache set, and set at_max_writeback_rate
1186 * to false.
1187 */
1188 if (unlikely(atomic_read(&d->c->at_max_writeback_rate) == 1)) {
1189 atomic_set(&d->c->at_max_writeback_rate, 0);
1190 quit_max_writeback_rate(d->c, dc);
1191 }
1192 }
1193
1194 bio_set_dev(bio, dc->bdev);
1195 bio->bi_iter.bi_sector += dc->sb.data_offset;
1196
1197 if (cached_dev_get(dc)) {
1198 s = search_alloc(bio, d);
1199 trace_bcache_request_start(s->d, bio);
1200
1201 if (!bio->bi_iter.bi_size) {
1202 /*
1203 * can't call bch_journal_meta from under
1204 * submit_bio_noacct
1205 */
1206 continue_at_nobarrier(&s->cl,
1207 cached_dev_nodata,
1208 bcache_wq);
1209 } else {
1210 s->iop.bypass = check_should_bypass(dc, bio);
1211
1212 if (rw)
1213 cached_dev_write(dc, s);
1214 else
1215 cached_dev_read(dc, s);
1216 }
1217 } else
1218 /* I/O request sent to backing device */
1219 detached_dev_do_request(d, bio);
1220
1221 return BLK_QC_T_NONE;
1222}
1223
1224static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode,
1225 unsigned int cmd, unsigned long arg)
1226{
1227 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1228
1229 if (dc->io_disable)
1230 return -EIO;
1231
1232 return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg);
1233}
1234
1235void bch_cached_dev_request_init(struct cached_dev *dc)
1236{
1237 dc->disk.cache_miss = cached_dev_cache_miss;
1238 dc->disk.ioctl = cached_dev_ioctl;
1239}
1240
1241/* Flash backed devices */
1242
1243static int flash_dev_cache_miss(struct btree *b, struct search *s,
1244 struct bio *bio, unsigned int sectors)
1245{
1246 unsigned int bytes = min(sectors, bio_sectors(bio)) << 9;
1247
1248 swap(bio->bi_iter.bi_size, bytes);
1249 zero_fill_bio(bio);
1250 swap(bio->bi_iter.bi_size, bytes);
1251
1252 bio_advance(bio, bytes);
1253
1254 if (!bio->bi_iter.bi_size)
1255 return MAP_DONE;
1256
1257 return MAP_CONTINUE;
1258}
1259
1260static void flash_dev_nodata(struct closure *cl)
1261{
1262 struct search *s = container_of(cl, struct search, cl);
1263
1264 if (s->iop.flush_journal)
1265 bch_journal_meta(s->iop.c, cl);
1266
1267 continue_at(cl, search_free, NULL);
1268}
1269
1270blk_qc_t flash_dev_submit_bio(struct bio *bio)
1271{
1272 struct search *s;
1273 struct closure *cl;
1274 struct bcache_device *d = bio->bi_disk->private_data;
1275
1276 if (unlikely(d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags))) {
1277 bio->bi_status = BLK_STS_IOERR;
1278 bio_endio(bio);
1279 return BLK_QC_T_NONE;
1280 }
1281
1282 s = search_alloc(bio, d);
1283 cl = &s->cl;
1284 bio = &s->bio.bio;
1285
1286 trace_bcache_request_start(s->d, bio);
1287
1288 if (!bio->bi_iter.bi_size) {
1289 /*
1290 * can't call bch_journal_meta from under submit_bio_noacct
1291 */
1292 continue_at_nobarrier(&s->cl,
1293 flash_dev_nodata,
1294 bcache_wq);
1295 return BLK_QC_T_NONE;
1296 } else if (bio_data_dir(bio)) {
1297 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
1298 &KEY(d->id, bio->bi_iter.bi_sector, 0),
1299 &KEY(d->id, bio_end_sector(bio), 0));
1300
1301 s->iop.bypass = (bio_op(bio) == REQ_OP_DISCARD) != 0;
1302 s->iop.writeback = true;
1303 s->iop.bio = bio;
1304
1305 closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
1306 } else {
1307 closure_call(&s->iop.cl, cache_lookup, NULL, cl);
1308 }
1309
1310 continue_at(cl, search_free, NULL);
1311 return BLK_QC_T_NONE;
1312}
1313
1314static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode,
1315 unsigned int cmd, unsigned long arg)
1316{
1317 return -ENOTTY;
1318}
1319
1320void bch_flash_dev_request_init(struct bcache_device *d)
1321{
1322 d->cache_miss = flash_dev_cache_miss;
1323 d->ioctl = flash_dev_ioctl;
1324}
1325
1326void bch_request_exit(void)
1327{
1328 kmem_cache_destroy(bch_search_cache);
1329}
1330
1331int __init bch_request_init(void)
1332{
1333 bch_search_cache = KMEM_CACHE(search, 0);
1334 if (!bch_search_cache)
1335 return -ENOMEM;
1336
1337 return 0;
1338}
1/*
2 * Main bcache entry point - handle a read or a write request and decide what to
3 * do with it; the make_request functions are called by the block layer.
4 *
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
7 */
8
9#include "bcache.h"
10#include "btree.h"
11#include "debug.h"
12#include "request.h"
13#include "writeback.h"
14
15#include <linux/module.h>
16#include <linux/hash.h>
17#include <linux/random.h>
18
19#include <trace/events/bcache.h>
20
21#define CUTOFF_CACHE_ADD 95
22#define CUTOFF_CACHE_READA 90
23
24struct kmem_cache *bch_search_cache;
25
26static void bch_data_insert_start(struct closure *);
27
28static unsigned cache_mode(struct cached_dev *dc, struct bio *bio)
29{
30 return BDEV_CACHE_MODE(&dc->sb);
31}
32
33static bool verify(struct cached_dev *dc, struct bio *bio)
34{
35 return dc->verify;
36}
37
38static void bio_csum(struct bio *bio, struct bkey *k)
39{
40 struct bio_vec bv;
41 struct bvec_iter iter;
42 uint64_t csum = 0;
43
44 bio_for_each_segment(bv, bio, iter) {
45 void *d = kmap(bv.bv_page) + bv.bv_offset;
46 csum = bch_crc64_update(csum, d, bv.bv_len);
47 kunmap(bv.bv_page);
48 }
49
50 k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1);
51}
52
53/* Insert data into cache */
54
55static void bch_data_insert_keys(struct closure *cl)
56{
57 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
58 atomic_t *journal_ref = NULL;
59 struct bkey *replace_key = op->replace ? &op->replace_key : NULL;
60 int ret;
61
62 /*
63 * If we're looping, might already be waiting on
64 * another journal write - can't wait on more than one journal write at
65 * a time
66 *
67 * XXX: this looks wrong
68 */
69#if 0
70 while (atomic_read(&s->cl.remaining) & CLOSURE_WAITING)
71 closure_sync(&s->cl);
72#endif
73
74 if (!op->replace)
75 journal_ref = bch_journal(op->c, &op->insert_keys,
76 op->flush_journal ? cl : NULL);
77
78 ret = bch_btree_insert(op->c, &op->insert_keys,
79 journal_ref, replace_key);
80 if (ret == -ESRCH) {
81 op->replace_collision = true;
82 } else if (ret) {
83 op->error = -ENOMEM;
84 op->insert_data_done = true;
85 }
86
87 if (journal_ref)
88 atomic_dec_bug(journal_ref);
89
90 if (!op->insert_data_done)
91 continue_at(cl, bch_data_insert_start, op->wq);
92
93 bch_keylist_free(&op->insert_keys);
94 closure_return(cl);
95}
96
97static int bch_keylist_realloc(struct keylist *l, unsigned u64s,
98 struct cache_set *c)
99{
100 size_t oldsize = bch_keylist_nkeys(l);
101 size_t newsize = oldsize + u64s;
102
103 /*
104 * The journalling code doesn't handle the case where the keys to insert
105 * is bigger than an empty write: If we just return -ENOMEM here,
106 * bio_insert() and bio_invalidate() will insert the keys created so far
107 * and finish the rest when the keylist is empty.
108 */
109 if (newsize * sizeof(uint64_t) > block_bytes(c) - sizeof(struct jset))
110 return -ENOMEM;
111
112 return __bch_keylist_realloc(l, u64s);
113}
114
115static void bch_data_invalidate(struct closure *cl)
116{
117 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
118 struct bio *bio = op->bio;
119
120 pr_debug("invalidating %i sectors from %llu",
121 bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector);
122
123 while (bio_sectors(bio)) {
124 unsigned sectors = min(bio_sectors(bio),
125 1U << (KEY_SIZE_BITS - 1));
126
127 if (bch_keylist_realloc(&op->insert_keys, 2, op->c))
128 goto out;
129
130 bio->bi_iter.bi_sector += sectors;
131 bio->bi_iter.bi_size -= sectors << 9;
132
133 bch_keylist_add(&op->insert_keys,
134 &KEY(op->inode, bio->bi_iter.bi_sector, sectors));
135 }
136
137 op->insert_data_done = true;
138 bio_put(bio);
139out:
140 continue_at(cl, bch_data_insert_keys, op->wq);
141}
142
143static void bch_data_insert_error(struct closure *cl)
144{
145 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
146
147 /*
148 * Our data write just errored, which means we've got a bunch of keys to
149 * insert that point to data that wasn't succesfully written.
150 *
151 * We don't have to insert those keys but we still have to invalidate
152 * that region of the cache - so, if we just strip off all the pointers
153 * from the keys we'll accomplish just that.
154 */
155
156 struct bkey *src = op->insert_keys.keys, *dst = op->insert_keys.keys;
157
158 while (src != op->insert_keys.top) {
159 struct bkey *n = bkey_next(src);
160
161 SET_KEY_PTRS(src, 0);
162 memmove(dst, src, bkey_bytes(src));
163
164 dst = bkey_next(dst);
165 src = n;
166 }
167
168 op->insert_keys.top = dst;
169
170 bch_data_insert_keys(cl);
171}
172
173static void bch_data_insert_endio(struct bio *bio, int error)
174{
175 struct closure *cl = bio->bi_private;
176 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
177
178 if (error) {
179 /* TODO: We could try to recover from this. */
180 if (op->writeback)
181 op->error = error;
182 else if (!op->replace)
183 set_closure_fn(cl, bch_data_insert_error, op->wq);
184 else
185 set_closure_fn(cl, NULL, NULL);
186 }
187
188 bch_bbio_endio(op->c, bio, error, "writing data to cache");
189}
190
191static void bch_data_insert_start(struct closure *cl)
192{
193 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
194 struct bio *bio = op->bio, *n;
195
196 if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) {
197 set_gc_sectors(op->c);
198 wake_up_gc(op->c);
199 }
200
201 if (op->bypass)
202 return bch_data_invalidate(cl);
203
204 /*
205 * Journal writes are marked REQ_FLUSH; if the original write was a
206 * flush, it'll wait on the journal write.
207 */
208 bio->bi_rw &= ~(REQ_FLUSH|REQ_FUA);
209
210 do {
211 unsigned i;
212 struct bkey *k;
213 struct bio_set *split = op->c->bio_split;
214
215 /* 1 for the device pointer and 1 for the chksum */
216 if (bch_keylist_realloc(&op->insert_keys,
217 3 + (op->csum ? 1 : 0),
218 op->c))
219 continue_at(cl, bch_data_insert_keys, op->wq);
220
221 k = op->insert_keys.top;
222 bkey_init(k);
223 SET_KEY_INODE(k, op->inode);
224 SET_KEY_OFFSET(k, bio->bi_iter.bi_sector);
225
226 if (!bch_alloc_sectors(op->c, k, bio_sectors(bio),
227 op->write_point, op->write_prio,
228 op->writeback))
229 goto err;
230
231 n = bio_next_split(bio, KEY_SIZE(k), GFP_NOIO, split);
232
233 n->bi_end_io = bch_data_insert_endio;
234 n->bi_private = cl;
235
236 if (op->writeback) {
237 SET_KEY_DIRTY(k, true);
238
239 for (i = 0; i < KEY_PTRS(k); i++)
240 SET_GC_MARK(PTR_BUCKET(op->c, k, i),
241 GC_MARK_DIRTY);
242 }
243
244 SET_KEY_CSUM(k, op->csum);
245 if (KEY_CSUM(k))
246 bio_csum(n, k);
247
248 trace_bcache_cache_insert(k);
249 bch_keylist_push(&op->insert_keys);
250
251 n->bi_rw |= REQ_WRITE;
252 bch_submit_bbio(n, op->c, k, 0);
253 } while (n != bio);
254
255 op->insert_data_done = true;
256 continue_at(cl, bch_data_insert_keys, op->wq);
257err:
258 /* bch_alloc_sectors() blocks if s->writeback = true */
259 BUG_ON(op->writeback);
260
261 /*
262 * But if it's not a writeback write we'd rather just bail out if
263 * there aren't any buckets ready to write to - it might take awhile and
264 * we might be starving btree writes for gc or something.
265 */
266
267 if (!op->replace) {
268 /*
269 * Writethrough write: We can't complete the write until we've
270 * updated the index. But we don't want to delay the write while
271 * we wait for buckets to be freed up, so just invalidate the
272 * rest of the write.
273 */
274 op->bypass = true;
275 return bch_data_invalidate(cl);
276 } else {
277 /*
278 * From a cache miss, we can just insert the keys for the data
279 * we have written or bail out if we didn't do anything.
280 */
281 op->insert_data_done = true;
282 bio_put(bio);
283
284 if (!bch_keylist_empty(&op->insert_keys))
285 continue_at(cl, bch_data_insert_keys, op->wq);
286 else
287 closure_return(cl);
288 }
289}
290
291/**
292 * bch_data_insert - stick some data in the cache
293 *
294 * This is the starting point for any data to end up in a cache device; it could
295 * be from a normal write, or a writeback write, or a write to a flash only
296 * volume - it's also used by the moving garbage collector to compact data in
297 * mostly empty buckets.
298 *
299 * It first writes the data to the cache, creating a list of keys to be inserted
300 * (if the data had to be fragmented there will be multiple keys); after the
301 * data is written it calls bch_journal, and after the keys have been added to
302 * the next journal write they're inserted into the btree.
303 *
304 * It inserts the data in s->cache_bio; bi_sector is used for the key offset,
305 * and op->inode is used for the key inode.
306 *
307 * If s->bypass is true, instead of inserting the data it invalidates the
308 * region of the cache represented by s->cache_bio and op->inode.
309 */
310void bch_data_insert(struct closure *cl)
311{
312 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
313
314 trace_bcache_write(op->bio, op->writeback, op->bypass);
315
316 bch_keylist_init(&op->insert_keys);
317 bio_get(op->bio);
318 bch_data_insert_start(cl);
319}
320
321/* Congested? */
322
323unsigned bch_get_congested(struct cache_set *c)
324{
325 int i;
326 long rand;
327
328 if (!c->congested_read_threshold_us &&
329 !c->congested_write_threshold_us)
330 return 0;
331
332 i = (local_clock_us() - c->congested_last_us) / 1024;
333 if (i < 0)
334 return 0;
335
336 i += atomic_read(&c->congested);
337 if (i >= 0)
338 return 0;
339
340 i += CONGESTED_MAX;
341
342 if (i > 0)
343 i = fract_exp_two(i, 6);
344
345 rand = get_random_int();
346 i -= bitmap_weight(&rand, BITS_PER_LONG);
347
348 return i > 0 ? i : 1;
349}
350
351static void add_sequential(struct task_struct *t)
352{
353 ewma_add(t->sequential_io_avg,
354 t->sequential_io, 8, 0);
355
356 t->sequential_io = 0;
357}
358
359static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)
360{
361 return &dc->io_hash[hash_64(k, RECENT_IO_BITS)];
362}
363
364static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
365{
366 struct cache_set *c = dc->disk.c;
367 unsigned mode = cache_mode(dc, bio);
368 unsigned sectors, congested = bch_get_congested(c);
369 struct task_struct *task = current;
370 struct io *i;
371
372 if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
373 c->gc_stats.in_use > CUTOFF_CACHE_ADD ||
374 (bio->bi_rw & REQ_DISCARD))
375 goto skip;
376
377 if (mode == CACHE_MODE_NONE ||
378 (mode == CACHE_MODE_WRITEAROUND &&
379 (bio->bi_rw & REQ_WRITE)))
380 goto skip;
381
382 if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
383 bio_sectors(bio) & (c->sb.block_size - 1)) {
384 pr_debug("skipping unaligned io");
385 goto skip;
386 }
387
388 if (bypass_torture_test(dc)) {
389 if ((get_random_int() & 3) == 3)
390 goto skip;
391 else
392 goto rescale;
393 }
394
395 if (!congested && !dc->sequential_cutoff)
396 goto rescale;
397
398 if (!congested &&
399 mode == CACHE_MODE_WRITEBACK &&
400 (bio->bi_rw & REQ_WRITE) &&
401 (bio->bi_rw & REQ_SYNC))
402 goto rescale;
403
404 spin_lock(&dc->io_lock);
405
406 hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash)
407 if (i->last == bio->bi_iter.bi_sector &&
408 time_before(jiffies, i->jiffies))
409 goto found;
410
411 i = list_first_entry(&dc->io_lru, struct io, lru);
412
413 add_sequential(task);
414 i->sequential = 0;
415found:
416 if (i->sequential + bio->bi_iter.bi_size > i->sequential)
417 i->sequential += bio->bi_iter.bi_size;
418
419 i->last = bio_end_sector(bio);
420 i->jiffies = jiffies + msecs_to_jiffies(5000);
421 task->sequential_io = i->sequential;
422
423 hlist_del(&i->hash);
424 hlist_add_head(&i->hash, iohash(dc, i->last));
425 list_move_tail(&i->lru, &dc->io_lru);
426
427 spin_unlock(&dc->io_lock);
428
429 sectors = max(task->sequential_io,
430 task->sequential_io_avg) >> 9;
431
432 if (dc->sequential_cutoff &&
433 sectors >= dc->sequential_cutoff >> 9) {
434 trace_bcache_bypass_sequential(bio);
435 goto skip;
436 }
437
438 if (congested && sectors >= congested) {
439 trace_bcache_bypass_congested(bio);
440 goto skip;
441 }
442
443rescale:
444 bch_rescale_priorities(c, bio_sectors(bio));
445 return false;
446skip:
447 bch_mark_sectors_bypassed(c, dc, bio_sectors(bio));
448 return true;
449}
450
451/* Cache lookup */
452
453struct search {
454 /* Stack frame for bio_complete */
455 struct closure cl;
456
457 struct bbio bio;
458 struct bio *orig_bio;
459 struct bio *cache_miss;
460 struct bcache_device *d;
461
462 unsigned insert_bio_sectors;
463 unsigned recoverable:1;
464 unsigned write:1;
465 unsigned read_dirty_data:1;
466
467 unsigned long start_time;
468
469 struct btree_op op;
470 struct data_insert_op iop;
471};
472
473static void bch_cache_read_endio(struct bio *bio, int error)
474{
475 struct bbio *b = container_of(bio, struct bbio, bio);
476 struct closure *cl = bio->bi_private;
477 struct search *s = container_of(cl, struct search, cl);
478
479 /*
480 * If the bucket was reused while our bio was in flight, we might have
481 * read the wrong data. Set s->error but not error so it doesn't get
482 * counted against the cache device, but we'll still reread the data
483 * from the backing device.
484 */
485
486 if (error)
487 s->iop.error = error;
488 else if (!KEY_DIRTY(&b->key) &&
489 ptr_stale(s->iop.c, &b->key, 0)) {
490 atomic_long_inc(&s->iop.c->cache_read_races);
491 s->iop.error = -EINTR;
492 }
493
494 bch_bbio_endio(s->iop.c, bio, error, "reading from cache");
495}
496
497/*
498 * Read from a single key, handling the initial cache miss if the key starts in
499 * the middle of the bio
500 */
501static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
502{
503 struct search *s = container_of(op, struct search, op);
504 struct bio *n, *bio = &s->bio.bio;
505 struct bkey *bio_key;
506 unsigned ptr;
507
508 if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0)
509 return MAP_CONTINUE;
510
511 if (KEY_INODE(k) != s->iop.inode ||
512 KEY_START(k) > bio->bi_iter.bi_sector) {
513 unsigned bio_sectors = bio_sectors(bio);
514 unsigned sectors = KEY_INODE(k) == s->iop.inode
515 ? min_t(uint64_t, INT_MAX,
516 KEY_START(k) - bio->bi_iter.bi_sector)
517 : INT_MAX;
518
519 int ret = s->d->cache_miss(b, s, bio, sectors);
520 if (ret != MAP_CONTINUE)
521 return ret;
522
523 /* if this was a complete miss we shouldn't get here */
524 BUG_ON(bio_sectors <= sectors);
525 }
526
527 if (!KEY_SIZE(k))
528 return MAP_CONTINUE;
529
530 /* XXX: figure out best pointer - for multiple cache devices */
531 ptr = 0;
532
533 PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO;
534
535 if (KEY_DIRTY(k))
536 s->read_dirty_data = true;
537
538 n = bio_next_split(bio, min_t(uint64_t, INT_MAX,
539 KEY_OFFSET(k) - bio->bi_iter.bi_sector),
540 GFP_NOIO, s->d->bio_split);
541
542 bio_key = &container_of(n, struct bbio, bio)->key;
543 bch_bkey_copy_single_ptr(bio_key, k, ptr);
544
545 bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key);
546 bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key);
547
548 n->bi_end_io = bch_cache_read_endio;
549 n->bi_private = &s->cl;
550
551 /*
552 * The bucket we're reading from might be reused while our bio
553 * is in flight, and we could then end up reading the wrong
554 * data.
555 *
556 * We guard against this by checking (in cache_read_endio()) if
557 * the pointer is stale again; if so, we treat it as an error
558 * and reread from the backing device (but we don't pass that
559 * error up anywhere).
560 */
561
562 __bch_submit_bbio(n, b->c);
563 return n == bio ? MAP_DONE : MAP_CONTINUE;
564}
565
566static void cache_lookup(struct closure *cl)
567{
568 struct search *s = container_of(cl, struct search, iop.cl);
569 struct bio *bio = &s->bio.bio;
570 int ret;
571
572 bch_btree_op_init(&s->op, -1);
573
574 ret = bch_btree_map_keys(&s->op, s->iop.c,
575 &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0),
576 cache_lookup_fn, MAP_END_KEY);
577 if (ret == -EAGAIN)
578 continue_at(cl, cache_lookup, bcache_wq);
579
580 closure_return(cl);
581}
582
583/* Common code for the make_request functions */
584
585static void request_endio(struct bio *bio, int error)
586{
587 struct closure *cl = bio->bi_private;
588
589 if (error) {
590 struct search *s = container_of(cl, struct search, cl);
591 s->iop.error = error;
592 /* Only cache read errors are recoverable */
593 s->recoverable = false;
594 }
595
596 bio_put(bio);
597 closure_put(cl);
598}
599
600static void bio_complete(struct search *s)
601{
602 if (s->orig_bio) {
603 int cpu, rw = bio_data_dir(s->orig_bio);
604 unsigned long duration = jiffies - s->start_time;
605
606 cpu = part_stat_lock();
607 part_round_stats(cpu, &s->d->disk->part0);
608 part_stat_add(cpu, &s->d->disk->part0, ticks[rw], duration);
609 part_stat_unlock();
610
611 trace_bcache_request_end(s->d, s->orig_bio);
612 bio_endio(s->orig_bio, s->iop.error);
613 s->orig_bio = NULL;
614 }
615}
616
617static void do_bio_hook(struct search *s, struct bio *orig_bio)
618{
619 struct bio *bio = &s->bio.bio;
620
621 bio_init(bio);
622 __bio_clone_fast(bio, orig_bio);
623 bio->bi_end_io = request_endio;
624 bio->bi_private = &s->cl;
625
626 atomic_set(&bio->bi_cnt, 3);
627}
628
629static void search_free(struct closure *cl)
630{
631 struct search *s = container_of(cl, struct search, cl);
632 bio_complete(s);
633
634 if (s->iop.bio)
635 bio_put(s->iop.bio);
636
637 closure_debug_destroy(cl);
638 mempool_free(s, s->d->c->search);
639}
640
641static inline struct search *search_alloc(struct bio *bio,
642 struct bcache_device *d)
643{
644 struct search *s;
645
646 s = mempool_alloc(d->c->search, GFP_NOIO);
647
648 closure_init(&s->cl, NULL);
649 do_bio_hook(s, bio);
650
651 s->orig_bio = bio;
652 s->cache_miss = NULL;
653 s->d = d;
654 s->recoverable = 1;
655 s->write = (bio->bi_rw & REQ_WRITE) != 0;
656 s->read_dirty_data = 0;
657 s->start_time = jiffies;
658
659 s->iop.c = d->c;
660 s->iop.bio = NULL;
661 s->iop.inode = d->id;
662 s->iop.write_point = hash_long((unsigned long) current, 16);
663 s->iop.write_prio = 0;
664 s->iop.error = 0;
665 s->iop.flags = 0;
666 s->iop.flush_journal = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0;
667 s->iop.wq = bcache_wq;
668
669 return s;
670}
671
672/* Cached devices */
673
674static void cached_dev_bio_complete(struct closure *cl)
675{
676 struct search *s = container_of(cl, struct search, cl);
677 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
678
679 search_free(cl);
680 cached_dev_put(dc);
681}
682
683/* Process reads */
684
685static void cached_dev_cache_miss_done(struct closure *cl)
686{
687 struct search *s = container_of(cl, struct search, cl);
688
689 if (s->iop.replace_collision)
690 bch_mark_cache_miss_collision(s->iop.c, s->d);
691
692 if (s->iop.bio) {
693 int i;
694 struct bio_vec *bv;
695
696 bio_for_each_segment_all(bv, s->iop.bio, i)
697 __free_page(bv->bv_page);
698 }
699
700 cached_dev_bio_complete(cl);
701}
702
703static void cached_dev_read_error(struct closure *cl)
704{
705 struct search *s = container_of(cl, struct search, cl);
706 struct bio *bio = &s->bio.bio;
707
708 if (s->recoverable) {
709 /* Retry from the backing device: */
710 trace_bcache_read_retry(s->orig_bio);
711
712 s->iop.error = 0;
713 do_bio_hook(s, s->orig_bio);
714
715 /* XXX: invalidate cache */
716
717 closure_bio_submit(bio, cl, s->d);
718 }
719
720 continue_at(cl, cached_dev_cache_miss_done, NULL);
721}
722
723static void cached_dev_read_done(struct closure *cl)
724{
725 struct search *s = container_of(cl, struct search, cl);
726 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
727
728 /*
729 * We had a cache miss; cache_bio now contains data ready to be inserted
730 * into the cache.
731 *
732 * First, we copy the data we just read from cache_bio's bounce buffers
733 * to the buffers the original bio pointed to:
734 */
735
736 if (s->iop.bio) {
737 bio_reset(s->iop.bio);
738 s->iop.bio->bi_iter.bi_sector = s->cache_miss->bi_iter.bi_sector;
739 s->iop.bio->bi_bdev = s->cache_miss->bi_bdev;
740 s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
741 bch_bio_map(s->iop.bio, NULL);
742
743 bio_copy_data(s->cache_miss, s->iop.bio);
744
745 bio_put(s->cache_miss);
746 s->cache_miss = NULL;
747 }
748
749 if (verify(dc, &s->bio.bio) && s->recoverable && !s->read_dirty_data)
750 bch_data_verify(dc, s->orig_bio);
751
752 bio_complete(s);
753
754 if (s->iop.bio &&
755 !test_bit(CACHE_SET_STOPPING, &s->iop.c->flags)) {
756 BUG_ON(!s->iop.replace);
757 closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
758 }
759
760 continue_at(cl, cached_dev_cache_miss_done, NULL);
761}
762
763static void cached_dev_read_done_bh(struct closure *cl)
764{
765 struct search *s = container_of(cl, struct search, cl);
766 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
767
768 bch_mark_cache_accounting(s->iop.c, s->d,
769 !s->cache_miss, s->iop.bypass);
770 trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass);
771
772 if (s->iop.error)
773 continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq);
774 else if (s->iop.bio || verify(dc, &s->bio.bio))
775 continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq);
776 else
777 continue_at_nobarrier(cl, cached_dev_bio_complete, NULL);
778}
779
780static int cached_dev_cache_miss(struct btree *b, struct search *s,
781 struct bio *bio, unsigned sectors)
782{
783 int ret = MAP_CONTINUE;
784 unsigned reada = 0;
785 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
786 struct bio *miss, *cache_bio;
787
788 if (s->cache_miss || s->iop.bypass) {
789 miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);
790 ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
791 goto out_submit;
792 }
793
794 if (!(bio->bi_rw & REQ_RAHEAD) &&
795 !(bio->bi_rw & REQ_META) &&
796 s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)
797 reada = min_t(sector_t, dc->readahead >> 9,
798 bdev_sectors(bio->bi_bdev) - bio_end_sector(bio));
799
800 s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada);
801
802 s->iop.replace_key = KEY(s->iop.inode,
803 bio->bi_iter.bi_sector + s->insert_bio_sectors,
804 s->insert_bio_sectors);
805
806 ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key);
807 if (ret)
808 return ret;
809
810 s->iop.replace = true;
811
812 miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);
813
814 /* btree_search_recurse()'s btree iterator is no good anymore */
815 ret = miss == bio ? MAP_DONE : -EINTR;
816
817 cache_bio = bio_alloc_bioset(GFP_NOWAIT,
818 DIV_ROUND_UP(s->insert_bio_sectors, PAGE_SECTORS),
819 dc->disk.bio_split);
820 if (!cache_bio)
821 goto out_submit;
822
823 cache_bio->bi_iter.bi_sector = miss->bi_iter.bi_sector;
824 cache_bio->bi_bdev = miss->bi_bdev;
825 cache_bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
826
827 cache_bio->bi_end_io = request_endio;
828 cache_bio->bi_private = &s->cl;
829
830 bch_bio_map(cache_bio, NULL);
831 if (bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO))
832 goto out_put;
833
834 if (reada)
835 bch_mark_cache_readahead(s->iop.c, s->d);
836
837 s->cache_miss = miss;
838 s->iop.bio = cache_bio;
839 bio_get(cache_bio);
840 closure_bio_submit(cache_bio, &s->cl, s->d);
841
842 return ret;
843out_put:
844 bio_put(cache_bio);
845out_submit:
846 miss->bi_end_io = request_endio;
847 miss->bi_private = &s->cl;
848 closure_bio_submit(miss, &s->cl, s->d);
849 return ret;
850}
851
852static void cached_dev_read(struct cached_dev *dc, struct search *s)
853{
854 struct closure *cl = &s->cl;
855
856 closure_call(&s->iop.cl, cache_lookup, NULL, cl);
857 continue_at(cl, cached_dev_read_done_bh, NULL);
858}
859
860/* Process writes */
861
862static void cached_dev_write_complete(struct closure *cl)
863{
864 struct search *s = container_of(cl, struct search, cl);
865 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
866
867 up_read_non_owner(&dc->writeback_lock);
868 cached_dev_bio_complete(cl);
869}
870
871static void cached_dev_write(struct cached_dev *dc, struct search *s)
872{
873 struct closure *cl = &s->cl;
874 struct bio *bio = &s->bio.bio;
875 struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0);
876 struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0);
877
878 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end);
879
880 down_read_non_owner(&dc->writeback_lock);
881 if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) {
882 /*
883 * We overlap with some dirty data undergoing background
884 * writeback, force this write to writeback
885 */
886 s->iop.bypass = false;
887 s->iop.writeback = true;
888 }
889
890 /*
891 * Discards aren't _required_ to do anything, so skipping if
892 * check_overlapping returned true is ok
893 *
894 * But check_overlapping drops dirty keys for which io hasn't started,
895 * so we still want to call it.
896 */
897 if (bio->bi_rw & REQ_DISCARD)
898 s->iop.bypass = true;
899
900 if (should_writeback(dc, s->orig_bio,
901 cache_mode(dc, bio),
902 s->iop.bypass)) {
903 s->iop.bypass = false;
904 s->iop.writeback = true;
905 }
906
907 if (s->iop.bypass) {
908 s->iop.bio = s->orig_bio;
909 bio_get(s->iop.bio);
910
911 if (!(bio->bi_rw & REQ_DISCARD) ||
912 blk_queue_discard(bdev_get_queue(dc->bdev)))
913 closure_bio_submit(bio, cl, s->d);
914 } else if (s->iop.writeback) {
915 bch_writeback_add(dc);
916 s->iop.bio = bio;
917
918 if (bio->bi_rw & REQ_FLUSH) {
919 /* Also need to send a flush to the backing device */
920 struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0,
921 dc->disk.bio_split);
922
923 flush->bi_rw = WRITE_FLUSH;
924 flush->bi_bdev = bio->bi_bdev;
925 flush->bi_end_io = request_endio;
926 flush->bi_private = cl;
927
928 closure_bio_submit(flush, cl, s->d);
929 }
930 } else {
931 s->iop.bio = bio_clone_fast(bio, GFP_NOIO, dc->disk.bio_split);
932
933 closure_bio_submit(bio, cl, s->d);
934 }
935
936 closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
937 continue_at(cl, cached_dev_write_complete, NULL);
938}
939
940static void cached_dev_nodata(struct closure *cl)
941{
942 struct search *s = container_of(cl, struct search, cl);
943 struct bio *bio = &s->bio.bio;
944
945 if (s->iop.flush_journal)
946 bch_journal_meta(s->iop.c, cl);
947
948 /* If it's a flush, we send the flush to the backing device too */
949 closure_bio_submit(bio, cl, s->d);
950
951 continue_at(cl, cached_dev_bio_complete, NULL);
952}
953
954/* Cached devices - read & write stuff */
955
956static void cached_dev_make_request(struct request_queue *q, struct bio *bio)
957{
958 struct search *s;
959 struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
960 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
961 int cpu, rw = bio_data_dir(bio);
962
963 cpu = part_stat_lock();
964 part_stat_inc(cpu, &d->disk->part0, ios[rw]);
965 part_stat_add(cpu, &d->disk->part0, sectors[rw], bio_sectors(bio));
966 part_stat_unlock();
967
968 bio->bi_bdev = dc->bdev;
969 bio->bi_iter.bi_sector += dc->sb.data_offset;
970
971 if (cached_dev_get(dc)) {
972 s = search_alloc(bio, d);
973 trace_bcache_request_start(s->d, bio);
974
975 if (!bio->bi_iter.bi_size) {
976 /*
977 * can't call bch_journal_meta from under
978 * generic_make_request
979 */
980 continue_at_nobarrier(&s->cl,
981 cached_dev_nodata,
982 bcache_wq);
983 } else {
984 s->iop.bypass = check_should_bypass(dc, bio);
985
986 if (rw)
987 cached_dev_write(dc, s);
988 else
989 cached_dev_read(dc, s);
990 }
991 } else {
992 if ((bio->bi_rw & REQ_DISCARD) &&
993 !blk_queue_discard(bdev_get_queue(dc->bdev)))
994 bio_endio(bio, 0);
995 else
996 bch_generic_make_request(bio, &d->bio_split_hook);
997 }
998}
999
1000static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode,
1001 unsigned int cmd, unsigned long arg)
1002{
1003 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1004 return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg);
1005}
1006
1007static int cached_dev_congested(void *data, int bits)
1008{
1009 struct bcache_device *d = data;
1010 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1011 struct request_queue *q = bdev_get_queue(dc->bdev);
1012 int ret = 0;
1013
1014 if (bdi_congested(&q->backing_dev_info, bits))
1015 return 1;
1016
1017 if (cached_dev_get(dc)) {
1018 unsigned i;
1019 struct cache *ca;
1020
1021 for_each_cache(ca, d->c, i) {
1022 q = bdev_get_queue(ca->bdev);
1023 ret |= bdi_congested(&q->backing_dev_info, bits);
1024 }
1025
1026 cached_dev_put(dc);
1027 }
1028
1029 return ret;
1030}
1031
1032void bch_cached_dev_request_init(struct cached_dev *dc)
1033{
1034 struct gendisk *g = dc->disk.disk;
1035
1036 g->queue->make_request_fn = cached_dev_make_request;
1037 g->queue->backing_dev_info.congested_fn = cached_dev_congested;
1038 dc->disk.cache_miss = cached_dev_cache_miss;
1039 dc->disk.ioctl = cached_dev_ioctl;
1040}
1041
1042/* Flash backed devices */
1043
1044static int flash_dev_cache_miss(struct btree *b, struct search *s,
1045 struct bio *bio, unsigned sectors)
1046{
1047 unsigned bytes = min(sectors, bio_sectors(bio)) << 9;
1048
1049 swap(bio->bi_iter.bi_size, bytes);
1050 zero_fill_bio(bio);
1051 swap(bio->bi_iter.bi_size, bytes);
1052
1053 bio_advance(bio, bytes);
1054
1055 if (!bio->bi_iter.bi_size)
1056 return MAP_DONE;
1057
1058 return MAP_CONTINUE;
1059}
1060
1061static void flash_dev_nodata(struct closure *cl)
1062{
1063 struct search *s = container_of(cl, struct search, cl);
1064
1065 if (s->iop.flush_journal)
1066 bch_journal_meta(s->iop.c, cl);
1067
1068 continue_at(cl, search_free, NULL);
1069}
1070
1071static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
1072{
1073 struct search *s;
1074 struct closure *cl;
1075 struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
1076 int cpu, rw = bio_data_dir(bio);
1077
1078 cpu = part_stat_lock();
1079 part_stat_inc(cpu, &d->disk->part0, ios[rw]);
1080 part_stat_add(cpu, &d->disk->part0, sectors[rw], bio_sectors(bio));
1081 part_stat_unlock();
1082
1083 s = search_alloc(bio, d);
1084 cl = &s->cl;
1085 bio = &s->bio.bio;
1086
1087 trace_bcache_request_start(s->d, bio);
1088
1089 if (!bio->bi_iter.bi_size) {
1090 /*
1091 * can't call bch_journal_meta from under
1092 * generic_make_request
1093 */
1094 continue_at_nobarrier(&s->cl,
1095 flash_dev_nodata,
1096 bcache_wq);
1097 } else if (rw) {
1098 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
1099 &KEY(d->id, bio->bi_iter.bi_sector, 0),
1100 &KEY(d->id, bio_end_sector(bio), 0));
1101
1102 s->iop.bypass = (bio->bi_rw & REQ_DISCARD) != 0;
1103 s->iop.writeback = true;
1104 s->iop.bio = bio;
1105
1106 closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
1107 } else {
1108 closure_call(&s->iop.cl, cache_lookup, NULL, cl);
1109 }
1110
1111 continue_at(cl, search_free, NULL);
1112}
1113
1114static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode,
1115 unsigned int cmd, unsigned long arg)
1116{
1117 return -ENOTTY;
1118}
1119
1120static int flash_dev_congested(void *data, int bits)
1121{
1122 struct bcache_device *d = data;
1123 struct request_queue *q;
1124 struct cache *ca;
1125 unsigned i;
1126 int ret = 0;
1127
1128 for_each_cache(ca, d->c, i) {
1129 q = bdev_get_queue(ca->bdev);
1130 ret |= bdi_congested(&q->backing_dev_info, bits);
1131 }
1132
1133 return ret;
1134}
1135
1136void bch_flash_dev_request_init(struct bcache_device *d)
1137{
1138 struct gendisk *g = d->disk;
1139
1140 g->queue->make_request_fn = flash_dev_make_request;
1141 g->queue->backing_dev_info.congested_fn = flash_dev_congested;
1142 d->cache_miss = flash_dev_cache_miss;
1143 d->ioctl = flash_dev_ioctl;
1144}
1145
1146void bch_request_exit(void)
1147{
1148 if (bch_search_cache)
1149 kmem_cache_destroy(bch_search_cache);
1150}
1151
1152int __init bch_request_init(void)
1153{
1154 bch_search_cache = KMEM_CACHE(search, 0);
1155 if (!bch_search_cache)
1156 return -ENOMEM;
1157
1158 return 0;
1159}