Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Some low level IO code, and hacks for various block layer limitations
4 *
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
7 */
8
9#include "bcache.h"
10#include "bset.h"
11#include "debug.h"
12
13#include <linux/blkdev.h>
14
15/* Bios with headers */
16
17void bch_bbio_free(struct bio *bio, struct cache_set *c)
18{
19 struct bbio *b = container_of(bio, struct bbio, bio);
20
21 mempool_free(b, &c->bio_meta);
22}
23
24struct bio *bch_bbio_alloc(struct cache_set *c)
25{
26 struct bbio *b = mempool_alloc(&c->bio_meta, GFP_NOIO);
27 struct bio *bio = &b->bio;
28
29 bio_init(bio, NULL, bio->bi_inline_vecs,
30 meta_bucket_pages(&c->cache->sb), 0);
31
32 return bio;
33}
34
35void __bch_submit_bbio(struct bio *bio, struct cache_set *c)
36{
37 struct bbio *b = container_of(bio, struct bbio, bio);
38
39 bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0);
40 bio_set_dev(bio, c->cache->bdev);
41
42 b->submit_time_us = local_clock_us();
43 closure_bio_submit(c, bio, bio->bi_private);
44}
45
46void bch_submit_bbio(struct bio *bio, struct cache_set *c,
47 struct bkey *k, unsigned int ptr)
48{
49 struct bbio *b = container_of(bio, struct bbio, bio);
50
51 bch_bkey_copy_single_ptr(&b->key, k, ptr);
52 __bch_submit_bbio(bio, c);
53}
54
55/* IO errors */
56void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio)
57{
58 unsigned int errors;
59
60 WARN_ONCE(!dc, "NULL pointer of struct cached_dev");
61
62 /*
63 * Read-ahead requests on a degrading and recovering md raid
64 * (e.g. raid6) device might be failured immediately by md
65 * raid code, which is not a real hardware media failure. So
66 * we shouldn't count failed REQ_RAHEAD bio to dc->io_errors.
67 */
68 if (bio->bi_opf & REQ_RAHEAD) {
69 pr_warn_ratelimited("%pg: Read-ahead I/O failed on backing device, ignore\n",
70 dc->bdev);
71 return;
72 }
73
74 errors = atomic_add_return(1, &dc->io_errors);
75 if (errors < dc->error_limit)
76 pr_err("%pg: IO error on backing device, unrecoverable\n",
77 dc->bdev);
78 else
79 bch_cached_dev_error(dc);
80}
81
82void bch_count_io_errors(struct cache *ca,
83 blk_status_t error,
84 int is_read,
85 const char *m)
86{
87 /*
88 * The halflife of an error is:
89 * log2(1/2)/log2(127/128) * refresh ~= 88 * refresh
90 */
91
92 if (ca->set->error_decay) {
93 unsigned int count = atomic_inc_return(&ca->io_count);
94
95 while (count > ca->set->error_decay) {
96 unsigned int errors;
97 unsigned int old = count;
98 unsigned int new = count - ca->set->error_decay;
99
100 /*
101 * First we subtract refresh from count; each time we
102 * successfully do so, we rescale the errors once:
103 */
104
105 count = atomic_cmpxchg(&ca->io_count, old, new);
106
107 if (count == old) {
108 count = new;
109
110 errors = atomic_read(&ca->io_errors);
111 do {
112 old = errors;
113 new = ((uint64_t) errors * 127) / 128;
114 errors = atomic_cmpxchg(&ca->io_errors,
115 old, new);
116 } while (old != errors);
117 }
118 }
119 }
120
121 if (error) {
122 unsigned int errors = atomic_add_return(1 << IO_ERROR_SHIFT,
123 &ca->io_errors);
124 errors >>= IO_ERROR_SHIFT;
125
126 if (errors < ca->set->error_limit)
127 pr_err("%pg: IO error on %s%s\n",
128 ca->bdev, m,
129 is_read ? ", recovering." : ".");
130 else
131 bch_cache_set_error(ca->set,
132 "%pg: too many IO errors %s\n",
133 ca->bdev, m);
134 }
135}
136
137void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
138 blk_status_t error, const char *m)
139{
140 struct bbio *b = container_of(bio, struct bbio, bio);
141 struct cache *ca = c->cache;
142 int is_read = (bio_data_dir(bio) == READ ? 1 : 0);
143
144 unsigned int threshold = op_is_write(bio_op(bio))
145 ? c->congested_write_threshold_us
146 : c->congested_read_threshold_us;
147
148 if (threshold) {
149 unsigned int t = local_clock_us();
150 int us = t - b->submit_time_us;
151 int congested = atomic_read(&c->congested);
152
153 if (us > (int) threshold) {
154 int ms = us / 1024;
155
156 c->congested_last_us = t;
157
158 ms = min(ms, CONGESTED_MAX + congested);
159 atomic_sub(ms, &c->congested);
160 } else if (congested < 0)
161 atomic_inc(&c->congested);
162 }
163
164 bch_count_io_errors(ca, error, is_read, m);
165}
166
167void bch_bbio_endio(struct cache_set *c, struct bio *bio,
168 blk_status_t error, const char *m)
169{
170 struct closure *cl = bio->bi_private;
171
172 bch_bbio_count_io_errors(c, bio, error, m);
173 bio_put(bio);
174 closure_put(cl);
175}
1/*
2 * Some low level IO code, and hacks for various block layer limitations
3 *
4 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
5 * Copyright 2012 Google, Inc.
6 */
7
8#include "bcache.h"
9#include "bset.h"
10#include "debug.h"
11
12#include <linux/blkdev.h>
13
14static unsigned bch_bio_max_sectors(struct bio *bio)
15{
16 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
17 struct bio_vec bv;
18 struct bvec_iter iter;
19 unsigned ret = 0, seg = 0;
20
21 if (bio->bi_rw & REQ_DISCARD)
22 return min(bio_sectors(bio), q->limits.max_discard_sectors);
23
24 bio_for_each_segment(bv, bio, iter) {
25 struct bvec_merge_data bvm = {
26 .bi_bdev = bio->bi_bdev,
27 .bi_sector = bio->bi_iter.bi_sector,
28 .bi_size = ret << 9,
29 .bi_rw = bio->bi_rw,
30 };
31
32 if (seg == min_t(unsigned, BIO_MAX_PAGES,
33 queue_max_segments(q)))
34 break;
35
36 if (q->merge_bvec_fn &&
37 q->merge_bvec_fn(q, &bvm, &bv) < (int) bv.bv_len)
38 break;
39
40 seg++;
41 ret += bv.bv_len >> 9;
42 }
43
44 ret = min(ret, queue_max_sectors(q));
45
46 WARN_ON(!ret);
47 ret = max_t(int, ret, bio_iovec(bio).bv_len >> 9);
48
49 return ret;
50}
51
52static void bch_bio_submit_split_done(struct closure *cl)
53{
54 struct bio_split_hook *s = container_of(cl, struct bio_split_hook, cl);
55
56 s->bio->bi_end_io = s->bi_end_io;
57 s->bio->bi_private = s->bi_private;
58 bio_endio_nodec(s->bio, 0);
59
60 closure_debug_destroy(&s->cl);
61 mempool_free(s, s->p->bio_split_hook);
62}
63
64static void bch_bio_submit_split_endio(struct bio *bio, int error)
65{
66 struct closure *cl = bio->bi_private;
67 struct bio_split_hook *s = container_of(cl, struct bio_split_hook, cl);
68
69 if (error)
70 clear_bit(BIO_UPTODATE, &s->bio->bi_flags);
71
72 bio_put(bio);
73 closure_put(cl);
74}
75
76void bch_generic_make_request(struct bio *bio, struct bio_split_pool *p)
77{
78 struct bio_split_hook *s;
79 struct bio *n;
80
81 if (!bio_has_data(bio) && !(bio->bi_rw & REQ_DISCARD))
82 goto submit;
83
84 if (bio_sectors(bio) <= bch_bio_max_sectors(bio))
85 goto submit;
86
87 s = mempool_alloc(p->bio_split_hook, GFP_NOIO);
88 closure_init(&s->cl, NULL);
89
90 s->bio = bio;
91 s->p = p;
92 s->bi_end_io = bio->bi_end_io;
93 s->bi_private = bio->bi_private;
94 bio_get(bio);
95
96 do {
97 n = bio_next_split(bio, bch_bio_max_sectors(bio),
98 GFP_NOIO, s->p->bio_split);
99
100 n->bi_end_io = bch_bio_submit_split_endio;
101 n->bi_private = &s->cl;
102
103 closure_get(&s->cl);
104 generic_make_request(n);
105 } while (n != bio);
106
107 continue_at(&s->cl, bch_bio_submit_split_done, NULL);
108submit:
109 generic_make_request(bio);
110}
111
112/* Bios with headers */
113
114void bch_bbio_free(struct bio *bio, struct cache_set *c)
115{
116 struct bbio *b = container_of(bio, struct bbio, bio);
117 mempool_free(b, c->bio_meta);
118}
119
120struct bio *bch_bbio_alloc(struct cache_set *c)
121{
122 struct bbio *b = mempool_alloc(c->bio_meta, GFP_NOIO);
123 struct bio *bio = &b->bio;
124
125 bio_init(bio);
126 bio->bi_flags |= BIO_POOL_NONE << BIO_POOL_OFFSET;
127 bio->bi_max_vecs = bucket_pages(c);
128 bio->bi_io_vec = bio->bi_inline_vecs;
129
130 return bio;
131}
132
133void __bch_submit_bbio(struct bio *bio, struct cache_set *c)
134{
135 struct bbio *b = container_of(bio, struct bbio, bio);
136
137 bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0);
138 bio->bi_bdev = PTR_CACHE(c, &b->key, 0)->bdev;
139
140 b->submit_time_us = local_clock_us();
141 closure_bio_submit(bio, bio->bi_private, PTR_CACHE(c, &b->key, 0));
142}
143
144void bch_submit_bbio(struct bio *bio, struct cache_set *c,
145 struct bkey *k, unsigned ptr)
146{
147 struct bbio *b = container_of(bio, struct bbio, bio);
148 bch_bkey_copy_single_ptr(&b->key, k, ptr);
149 __bch_submit_bbio(bio, c);
150}
151
152/* IO errors */
153
154void bch_count_io_errors(struct cache *ca, int error, const char *m)
155{
156 /*
157 * The halflife of an error is:
158 * log2(1/2)/log2(127/128) * refresh ~= 88 * refresh
159 */
160
161 if (ca->set->error_decay) {
162 unsigned count = atomic_inc_return(&ca->io_count);
163
164 while (count > ca->set->error_decay) {
165 unsigned errors;
166 unsigned old = count;
167 unsigned new = count - ca->set->error_decay;
168
169 /*
170 * First we subtract refresh from count; each time we
171 * succesfully do so, we rescale the errors once:
172 */
173
174 count = atomic_cmpxchg(&ca->io_count, old, new);
175
176 if (count == old) {
177 count = new;
178
179 errors = atomic_read(&ca->io_errors);
180 do {
181 old = errors;
182 new = ((uint64_t) errors * 127) / 128;
183 errors = atomic_cmpxchg(&ca->io_errors,
184 old, new);
185 } while (old != errors);
186 }
187 }
188 }
189
190 if (error) {
191 char buf[BDEVNAME_SIZE];
192 unsigned errors = atomic_add_return(1 << IO_ERROR_SHIFT,
193 &ca->io_errors);
194 errors >>= IO_ERROR_SHIFT;
195
196 if (errors < ca->set->error_limit)
197 pr_err("%s: IO error on %s, recovering",
198 bdevname(ca->bdev, buf), m);
199 else
200 bch_cache_set_error(ca->set,
201 "%s: too many IO errors %s",
202 bdevname(ca->bdev, buf), m);
203 }
204}
205
206void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
207 int error, const char *m)
208{
209 struct bbio *b = container_of(bio, struct bbio, bio);
210 struct cache *ca = PTR_CACHE(c, &b->key, 0);
211
212 unsigned threshold = bio->bi_rw & REQ_WRITE
213 ? c->congested_write_threshold_us
214 : c->congested_read_threshold_us;
215
216 if (threshold) {
217 unsigned t = local_clock_us();
218
219 int us = t - b->submit_time_us;
220 int congested = atomic_read(&c->congested);
221
222 if (us > (int) threshold) {
223 int ms = us / 1024;
224 c->congested_last_us = t;
225
226 ms = min(ms, CONGESTED_MAX + congested);
227 atomic_sub(ms, &c->congested);
228 } else if (congested < 0)
229 atomic_inc(&c->congested);
230 }
231
232 bch_count_io_errors(ca, error, m);
233}
234
235void bch_bbio_endio(struct cache_set *c, struct bio *bio,
236 int error, const char *m)
237{
238 struct closure *cl = bio->bi_private;
239
240 bch_bbio_count_io_errors(c, bio, error, m);
241 bio_put(bio);
242 closure_put(cl);
243}