Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Some low level IO code, and hacks for various block layer limitations
4 *
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
7 */
8
9#include "bcache.h"
10#include "bset.h"
11#include "debug.h"
12
13#include <linux/blkdev.h>
14
15/* Bios with headers */
16
17void bch_bbio_free(struct bio *bio, struct cache_set *c)
18{
19 struct bbio *b = container_of(bio, struct bbio, bio);
20
21 mempool_free(b, &c->bio_meta);
22}
23
24struct bio *bch_bbio_alloc(struct cache_set *c)
25{
26 struct bbio *b = mempool_alloc(&c->bio_meta, GFP_NOIO);
27 struct bio *bio = &b->bio;
28
29 bio_init(bio, NULL, bio->bi_inline_vecs,
30 meta_bucket_pages(&c->cache->sb), 0);
31
32 return bio;
33}
34
35void __bch_submit_bbio(struct bio *bio, struct cache_set *c)
36{
37 struct bbio *b = container_of(bio, struct bbio, bio);
38
39 bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0);
40 bio_set_dev(bio, c->cache->bdev);
41
42 b->submit_time_us = local_clock_us();
43 closure_bio_submit(c, bio, bio->bi_private);
44}
45
46void bch_submit_bbio(struct bio *bio, struct cache_set *c,
47 struct bkey *k, unsigned int ptr)
48{
49 struct bbio *b = container_of(bio, struct bbio, bio);
50
51 bch_bkey_copy_single_ptr(&b->key, k, ptr);
52 __bch_submit_bbio(bio, c);
53}
54
55/* IO errors */
56void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio)
57{
58 unsigned int errors;
59
60 WARN_ONCE(!dc, "NULL pointer of struct cached_dev");
61
62 /*
63 * Read-ahead requests on a degrading and recovering md raid
64 * (e.g. raid6) device might be failured immediately by md
65 * raid code, which is not a real hardware media failure. So
66 * we shouldn't count failed REQ_RAHEAD bio to dc->io_errors.
67 */
68 if (bio->bi_opf & REQ_RAHEAD) {
69 pr_warn_ratelimited("%pg: Read-ahead I/O failed on backing device, ignore\n",
70 dc->bdev);
71 return;
72 }
73
74 errors = atomic_add_return(1, &dc->io_errors);
75 if (errors < dc->error_limit)
76 pr_err("%pg: IO error on backing device, unrecoverable\n",
77 dc->bdev);
78 else
79 bch_cached_dev_error(dc);
80}
81
82void bch_count_io_errors(struct cache *ca,
83 blk_status_t error,
84 int is_read,
85 const char *m)
86{
87 /*
88 * The halflife of an error is:
89 * log2(1/2)/log2(127/128) * refresh ~= 88 * refresh
90 */
91
92 if (ca->set->error_decay) {
93 unsigned int count = atomic_inc_return(&ca->io_count);
94
95 while (count > ca->set->error_decay) {
96 unsigned int errors;
97 unsigned int old = count;
98 unsigned int new = count - ca->set->error_decay;
99
100 /*
101 * First we subtract refresh from count; each time we
102 * successfully do so, we rescale the errors once:
103 */
104
105 count = atomic_cmpxchg(&ca->io_count, old, new);
106
107 if (count == old) {
108 count = new;
109
110 errors = atomic_read(&ca->io_errors);
111 do {
112 old = errors;
113 new = ((uint64_t) errors * 127) / 128;
114 errors = atomic_cmpxchg(&ca->io_errors,
115 old, new);
116 } while (old != errors);
117 }
118 }
119 }
120
121 if (error) {
122 unsigned int errors = atomic_add_return(1 << IO_ERROR_SHIFT,
123 &ca->io_errors);
124 errors >>= IO_ERROR_SHIFT;
125
126 if (errors < ca->set->error_limit)
127 pr_err("%pg: IO error on %s%s\n",
128 ca->bdev, m,
129 is_read ? ", recovering." : ".");
130 else
131 bch_cache_set_error(ca->set,
132 "%pg: too many IO errors %s\n",
133 ca->bdev, m);
134 }
135}
136
137void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
138 blk_status_t error, const char *m)
139{
140 struct bbio *b = container_of(bio, struct bbio, bio);
141 struct cache *ca = c->cache;
142 int is_read = (bio_data_dir(bio) == READ ? 1 : 0);
143
144 unsigned int threshold = op_is_write(bio_op(bio))
145 ? c->congested_write_threshold_us
146 : c->congested_read_threshold_us;
147
148 if (threshold) {
149 unsigned int t = local_clock_us();
150 int us = t - b->submit_time_us;
151 int congested = atomic_read(&c->congested);
152
153 if (us > (int) threshold) {
154 int ms = us / 1024;
155
156 c->congested_last_us = t;
157
158 ms = min(ms, CONGESTED_MAX + congested);
159 atomic_sub(ms, &c->congested);
160 } else if (congested < 0)
161 atomic_inc(&c->congested);
162 }
163
164 bch_count_io_errors(ca, error, is_read, m);
165}
166
167void bch_bbio_endio(struct cache_set *c, struct bio *bio,
168 blk_status_t error, const char *m)
169{
170 struct closure *cl = bio->bi_private;
171
172 bch_bbio_count_io_errors(c, bio, error, m);
173 bio_put(bio);
174 closure_put(cl);
175}
1/*
2 * Some low level IO code, and hacks for various block layer limitations
3 *
4 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
5 * Copyright 2012 Google, Inc.
6 */
7
8#include "bcache.h"
9#include "bset.h"
10#include "debug.h"
11
12#include <linux/blkdev.h>
13
14/* Bios with headers */
15
16void bch_bbio_free(struct bio *bio, struct cache_set *c)
17{
18 struct bbio *b = container_of(bio, struct bbio, bio);
19 mempool_free(b, c->bio_meta);
20}
21
22struct bio *bch_bbio_alloc(struct cache_set *c)
23{
24 struct bbio *b = mempool_alloc(c->bio_meta, GFP_NOIO);
25 struct bio *bio = &b->bio;
26
27 bio_init(bio);
28 bio->bi_flags |= BIO_POOL_NONE << BIO_POOL_OFFSET;
29 bio->bi_max_vecs = bucket_pages(c);
30 bio->bi_io_vec = bio->bi_inline_vecs;
31
32 return bio;
33}
34
35void __bch_submit_bbio(struct bio *bio, struct cache_set *c)
36{
37 struct bbio *b = container_of(bio, struct bbio, bio);
38
39 bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0);
40 bio->bi_bdev = PTR_CACHE(c, &b->key, 0)->bdev;
41
42 b->submit_time_us = local_clock_us();
43 closure_bio_submit(bio, bio->bi_private);
44}
45
46void bch_submit_bbio(struct bio *bio, struct cache_set *c,
47 struct bkey *k, unsigned ptr)
48{
49 struct bbio *b = container_of(bio, struct bbio, bio);
50 bch_bkey_copy_single_ptr(&b->key, k, ptr);
51 __bch_submit_bbio(bio, c);
52}
53
54/* IO errors */
55
56void bch_count_io_errors(struct cache *ca, int error, const char *m)
57{
58 /*
59 * The halflife of an error is:
60 * log2(1/2)/log2(127/128) * refresh ~= 88 * refresh
61 */
62
63 if (ca->set->error_decay) {
64 unsigned count = atomic_inc_return(&ca->io_count);
65
66 while (count > ca->set->error_decay) {
67 unsigned errors;
68 unsigned old = count;
69 unsigned new = count - ca->set->error_decay;
70
71 /*
72 * First we subtract refresh from count; each time we
73 * succesfully do so, we rescale the errors once:
74 */
75
76 count = atomic_cmpxchg(&ca->io_count, old, new);
77
78 if (count == old) {
79 count = new;
80
81 errors = atomic_read(&ca->io_errors);
82 do {
83 old = errors;
84 new = ((uint64_t) errors * 127) / 128;
85 errors = atomic_cmpxchg(&ca->io_errors,
86 old, new);
87 } while (old != errors);
88 }
89 }
90 }
91
92 if (error) {
93 char buf[BDEVNAME_SIZE];
94 unsigned errors = atomic_add_return(1 << IO_ERROR_SHIFT,
95 &ca->io_errors);
96 errors >>= IO_ERROR_SHIFT;
97
98 if (errors < ca->set->error_limit)
99 pr_err("%s: IO error on %s, recovering",
100 bdevname(ca->bdev, buf), m);
101 else
102 bch_cache_set_error(ca->set,
103 "%s: too many IO errors %s",
104 bdevname(ca->bdev, buf), m);
105 }
106}
107
108void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
109 int error, const char *m)
110{
111 struct bbio *b = container_of(bio, struct bbio, bio);
112 struct cache *ca = PTR_CACHE(c, &b->key, 0);
113
114 unsigned threshold = bio->bi_rw & REQ_WRITE
115 ? c->congested_write_threshold_us
116 : c->congested_read_threshold_us;
117
118 if (threshold) {
119 unsigned t = local_clock_us();
120
121 int us = t - b->submit_time_us;
122 int congested = atomic_read(&c->congested);
123
124 if (us > (int) threshold) {
125 int ms = us / 1024;
126 c->congested_last_us = t;
127
128 ms = min(ms, CONGESTED_MAX + congested);
129 atomic_sub(ms, &c->congested);
130 } else if (congested < 0)
131 atomic_inc(&c->congested);
132 }
133
134 bch_count_io_errors(ca, error, m);
135}
136
137void bch_bbio_endio(struct cache_set *c, struct bio *bio,
138 int error, const char *m)
139{
140 struct closure *cl = bio->bi_private;
141
142 bch_bbio_count_io_errors(c, bio, error, m);
143 bio_put(bio);
144 closure_put(cl);
145}