Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Some low level IO code, and hacks for various block layer limitations
  4 *
  5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
  6 * Copyright 2012 Google, Inc.
  7 */
  8
  9#include "bcache.h"
 10#include "bset.h"
 11#include "debug.h"
 12
 13#include <linux/blkdev.h>
 14
 15/* Bios with headers */
 16
 17void bch_bbio_free(struct bio *bio, struct cache_set *c)
 18{
 19	struct bbio *b = container_of(bio, struct bbio, bio);
 20
 21	mempool_free(b, &c->bio_meta);
 22}
 23
 24struct bio *bch_bbio_alloc(struct cache_set *c)
 25{
 26	struct bbio *b = mempool_alloc(&c->bio_meta, GFP_NOIO);
 27	struct bio *bio = &b->bio;
 28
 29	bio_init(bio, NULL, bio->bi_inline_vecs,
 30		 meta_bucket_pages(&c->cache->sb), 0);
 31
 32	return bio;
 33}
 34
 35void __bch_submit_bbio(struct bio *bio, struct cache_set *c)
 36{
 37	struct bbio *b = container_of(bio, struct bbio, bio);
 38
 39	bio->bi_iter.bi_sector	= PTR_OFFSET(&b->key, 0);
 40	bio_set_dev(bio, c->cache->bdev);
 41
 42	b->submit_time_us = local_clock_us();
 43	closure_bio_submit(c, bio, bio->bi_private);
 44}
 45
 46void bch_submit_bbio(struct bio *bio, struct cache_set *c,
 47		     struct bkey *k, unsigned int ptr)
 48{
 49	struct bbio *b = container_of(bio, struct bbio, bio);
 50
 51	bch_bkey_copy_single_ptr(&b->key, k, ptr);
 52	__bch_submit_bbio(bio, c);
 53}
 54
 55/* IO errors */
 56void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio)
 57{
 58	unsigned int errors;
 59
 60	WARN_ONCE(!dc, "NULL pointer of struct cached_dev");
 61
 62	/*
 63	 * Read-ahead requests on a degrading and recovering md raid
 64	 * (e.g. raid6) device might be failured immediately by md
 65	 * raid code, which is not a real hardware media failure. So
 66	 * we shouldn't count failed REQ_RAHEAD bio to dc->io_errors.
 67	 */
 68	if (bio->bi_opf & REQ_RAHEAD) {
 69		pr_warn_ratelimited("%pg: Read-ahead I/O failed on backing device, ignore\n",
 70				    dc->bdev);
 71		return;
 72	}
 73
 74	errors = atomic_add_return(1, &dc->io_errors);
 75	if (errors < dc->error_limit)
 76		pr_err("%pg: IO error on backing device, unrecoverable\n",
 77			dc->bdev);
 78	else
 79		bch_cached_dev_error(dc);
 80}
 81
 82void bch_count_io_errors(struct cache *ca,
 83			 blk_status_t error,
 84			 int is_read,
 85			 const char *m)
 86{
 87	/*
 88	 * The halflife of an error is:
 89	 * log2(1/2)/log2(127/128) * refresh ~= 88 * refresh
 90	 */
 91
 92	if (ca->set->error_decay) {
 93		unsigned int count = atomic_inc_return(&ca->io_count);
 94
 95		while (count > ca->set->error_decay) {
 96			unsigned int errors;
 97			unsigned int old = count;
 98			unsigned int new = count - ca->set->error_decay;
 99
100			/*
101			 * First we subtract refresh from count; each time we
102			 * successfully do so, we rescale the errors once:
103			 */
104
105			count = atomic_cmpxchg(&ca->io_count, old, new);
106
107			if (count == old) {
108				count = new;
109
110				errors = atomic_read(&ca->io_errors);
111				do {
112					old = errors;
113					new = ((uint64_t) errors * 127) / 128;
114					errors = atomic_cmpxchg(&ca->io_errors,
115								old, new);
116				} while (old != errors);
117			}
118		}
119	}
120
121	if (error) {
122		unsigned int errors = atomic_add_return(1 << IO_ERROR_SHIFT,
123						    &ca->io_errors);
124		errors >>= IO_ERROR_SHIFT;
125
126		if (errors < ca->set->error_limit)
127			pr_err("%pg: IO error on %s%s\n",
128			       ca->bdev, m,
129			       is_read ? ", recovering." : ".");
130		else
131			bch_cache_set_error(ca->set,
132					    "%pg: too many IO errors %s\n",
133					    ca->bdev, m);
134	}
135}
136
137void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
138			      blk_status_t error, const char *m)
139{
140	struct bbio *b = container_of(bio, struct bbio, bio);
141	struct cache *ca = c->cache;
142	int is_read = (bio_data_dir(bio) == READ ? 1 : 0);
143
144	unsigned int threshold = op_is_write(bio_op(bio))
145		? c->congested_write_threshold_us
146		: c->congested_read_threshold_us;
147
148	if (threshold) {
149		unsigned int t = local_clock_us();
150		int us = t - b->submit_time_us;
151		int congested = atomic_read(&c->congested);
152
153		if (us > (int) threshold) {
154			int ms = us / 1024;
155
156			c->congested_last_us = t;
157
158			ms = min(ms, CONGESTED_MAX + congested);
159			atomic_sub(ms, &c->congested);
160		} else if (congested < 0)
161			atomic_inc(&c->congested);
162	}
163
164	bch_count_io_errors(ca, error, is_read, m);
165}
166
167void bch_bbio_endio(struct cache_set *c, struct bio *bio,
168		    blk_status_t error, const char *m)
169{
170	struct closure *cl = bio->bi_private;
171
172	bch_bbio_count_io_errors(c, bio, error, m);
173	bio_put(bio);
174	closure_put(cl);
175}
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Some low level IO code, and hacks for various block layer limitations
  4 *
  5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
  6 * Copyright 2012 Google, Inc.
  7 */
  8
  9#include "bcache.h"
 10#include "bset.h"
 11#include "debug.h"
 12
 13#include <linux/blkdev.h>
 14
 15/* Bios with headers */
 16
 17void bch_bbio_free(struct bio *bio, struct cache_set *c)
 18{
 19	struct bbio *b = container_of(bio, struct bbio, bio);
 20
 21	mempool_free(b, &c->bio_meta);
 22}
 23
 24struct bio *bch_bbio_alloc(struct cache_set *c)
 25{
 26	struct bbio *b = mempool_alloc(&c->bio_meta, GFP_NOIO);
 27	struct bio *bio = &b->bio;
 28
 29	bio_init(bio, bio->bi_inline_vecs, meta_bucket_pages(&c->cache->sb));
 
 30
 31	return bio;
 32}
 33
 34void __bch_submit_bbio(struct bio *bio, struct cache_set *c)
 35{
 36	struct bbio *b = container_of(bio, struct bbio, bio);
 37
 38	bio->bi_iter.bi_sector	= PTR_OFFSET(&b->key, 0);
 39	bio_set_dev(bio, c->cache->bdev);
 40
 41	b->submit_time_us = local_clock_us();
 42	closure_bio_submit(c, bio, bio->bi_private);
 43}
 44
 45void bch_submit_bbio(struct bio *bio, struct cache_set *c,
 46		     struct bkey *k, unsigned int ptr)
 47{
 48	struct bbio *b = container_of(bio, struct bbio, bio);
 49
 50	bch_bkey_copy_single_ptr(&b->key, k, ptr);
 51	__bch_submit_bbio(bio, c);
 52}
 53
 54/* IO errors */
 55void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio)
 56{
 57	unsigned int errors;
 58
 59	WARN_ONCE(!dc, "NULL pointer of struct cached_dev");
 60
 61	/*
 62	 * Read-ahead requests on a degrading and recovering md raid
 63	 * (e.g. raid6) device might be failured immediately by md
 64	 * raid code, which is not a real hardware media failure. So
 65	 * we shouldn't count failed REQ_RAHEAD bio to dc->io_errors.
 66	 */
 67	if (bio->bi_opf & REQ_RAHEAD) {
 68		pr_warn_ratelimited("%s: Read-ahead I/O failed on backing device, ignore\n",
 69				    dc->backing_dev_name);
 70		return;
 71	}
 72
 73	errors = atomic_add_return(1, &dc->io_errors);
 74	if (errors < dc->error_limit)
 75		pr_err("%s: IO error on backing device, unrecoverable\n",
 76			dc->backing_dev_name);
 77	else
 78		bch_cached_dev_error(dc);
 79}
 80
 81void bch_count_io_errors(struct cache *ca,
 82			 blk_status_t error,
 83			 int is_read,
 84			 const char *m)
 85{
 86	/*
 87	 * The halflife of an error is:
 88	 * log2(1/2)/log2(127/128) * refresh ~= 88 * refresh
 89	 */
 90
 91	if (ca->set->error_decay) {
 92		unsigned int count = atomic_inc_return(&ca->io_count);
 93
 94		while (count > ca->set->error_decay) {
 95			unsigned int errors;
 96			unsigned int old = count;
 97			unsigned int new = count - ca->set->error_decay;
 98
 99			/*
100			 * First we subtract refresh from count; each time we
101			 * successfully do so, we rescale the errors once:
102			 */
103
104			count = atomic_cmpxchg(&ca->io_count, old, new);
105
106			if (count == old) {
107				count = new;
108
109				errors = atomic_read(&ca->io_errors);
110				do {
111					old = errors;
112					new = ((uint64_t) errors * 127) / 128;
113					errors = atomic_cmpxchg(&ca->io_errors,
114								old, new);
115				} while (old != errors);
116			}
117		}
118	}
119
120	if (error) {
121		unsigned int errors = atomic_add_return(1 << IO_ERROR_SHIFT,
122						    &ca->io_errors);
123		errors >>= IO_ERROR_SHIFT;
124
125		if (errors < ca->set->error_limit)
126			pr_err("%s: IO error on %s%s\n",
127			       ca->cache_dev_name, m,
128			       is_read ? ", recovering." : ".");
129		else
130			bch_cache_set_error(ca->set,
131					    "%s: too many IO errors %s\n",
132					    ca->cache_dev_name, m);
133	}
134}
135
136void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
137			      blk_status_t error, const char *m)
138{
139	struct bbio *b = container_of(bio, struct bbio, bio);
140	struct cache *ca = c->cache;
141	int is_read = (bio_data_dir(bio) == READ ? 1 : 0);
142
143	unsigned int threshold = op_is_write(bio_op(bio))
144		? c->congested_write_threshold_us
145		: c->congested_read_threshold_us;
146
147	if (threshold) {
148		unsigned int t = local_clock_us();
149		int us = t - b->submit_time_us;
150		int congested = atomic_read(&c->congested);
151
152		if (us > (int) threshold) {
153			int ms = us / 1024;
154
155			c->congested_last_us = t;
156
157			ms = min(ms, CONGESTED_MAX + congested);
158			atomic_sub(ms, &c->congested);
159		} else if (congested < 0)
160			atomic_inc(&c->congested);
161	}
162
163	bch_count_io_errors(ca, error, is_read, m);
164}
165
166void bch_bbio_endio(struct cache_set *c, struct bio *bio,
167		    blk_status_t error, const char *m)
168{
169	struct closure *cl = bio->bi_private;
170
171	bch_bbio_count_io_errors(c, bio, error, m);
172	bio_put(bio);
173	closure_put(cl);
174}