Linux Audio

Check our new training course

In-person Linux kernel drivers training

Jun 16-20, 2025
Register
Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Assorted bcache debug code
  4 *
  5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
  6 * Copyright 2012 Google, Inc.
  7 */
  8
  9#include "bcache.h"
 10#include "btree.h"
 11#include "debug.h"
 12#include "extents.h"
 13
 14#include <linux/console.h>
 15#include <linux/debugfs.h>
 16#include <linux/module.h>
 17#include <linux/random.h>
 18#include <linux/seq_file.h>
 19
 20struct dentry *bcache_debug;
 21
 22#ifdef CONFIG_BCACHE_DEBUG
 23
 24#define for_each_written_bset(b, start, i)				\
 25	for (i = (start);						\
 26	     (void *) i < (void *) (start) + (KEY_SIZE(&b->key) << 9) &&\
 27	     i->seq == (start)->seq;					\
 28	     i = (void *) i + set_blocks(i, block_bytes(b->c->cache)) *	\
 29		 block_bytes(b->c->cache))
 30
 31void bch_btree_verify(struct btree *b)
 32{
 33	struct btree *v = b->c->verify_data;
 34	struct bset *ondisk, *sorted, *inmemory;
 35	struct bio *bio;
 36
 37	if (!b->c->verify || !b->c->verify_ondisk)
 38		return;
 39
 40	down(&b->io_mutex);
 41	mutex_lock(&b->c->verify_lock);
 42
 43	ondisk = b->c->verify_ondisk;
 44	sorted = b->c->verify_data->keys.set->data;
 45	inmemory = b->keys.set->data;
 46
 47	bkey_copy(&v->key, &b->key);
 48	v->written = 0;
 49	v->level = b->level;
 50	v->keys.ops = b->keys.ops;
 51
 52	bio = bch_bbio_alloc(b->c);
 53	bio_set_dev(bio, b->c->cache->bdev);
 54	bio->bi_iter.bi_sector	= PTR_OFFSET(&b->key, 0);
 55	bio->bi_iter.bi_size	= KEY_SIZE(&v->key) << 9;
 56	bio->bi_opf		= REQ_OP_READ | REQ_META;
 57	bch_bio_map(bio, sorted);
 58
 59	submit_bio_wait(bio);
 60	bch_bbio_free(bio, b->c);
 61
 62	memcpy(ondisk, sorted, KEY_SIZE(&v->key) << 9);
 63
 64	bch_btree_node_read_done(v);
 65	sorted = v->keys.set->data;
 66
 67	if (inmemory->keys != sorted->keys ||
 68	    memcmp(inmemory->start,
 69		   sorted->start,
 70		   (void *) bset_bkey_last(inmemory) -
 71		   (void *) inmemory->start)) {
 72		struct bset *i;
 73		unsigned int j;
 74
 75		console_lock();
 76
 77		pr_err("*** in memory:\n");
 78		bch_dump_bset(&b->keys, inmemory, 0);
 79
 80		pr_err("*** read back in:\n");
 81		bch_dump_bset(&v->keys, sorted, 0);
 82
 83		for_each_written_bset(b, ondisk, i) {
 84			unsigned int block = ((void *) i - (void *) ondisk) /
 85				block_bytes(b->c->cache);
 86
 87			pr_err("*** on disk block %u:\n", block);
 88			bch_dump_bset(&b->keys, i, block);
 89		}
 90
 91		pr_err("*** block %zu not written\n",
 92		       ((void *) i - (void *) ondisk) / block_bytes(b->c->cache));
 93
 94		for (j = 0; j < inmemory->keys; j++)
 95			if (inmemory->d[j] != sorted->d[j])
 96				break;
 97
 98		pr_err("b->written %u\n", b->written);
 99
100		console_unlock();
101		panic("verify failed at %u\n", j);
102	}
103
104	mutex_unlock(&b->c->verify_lock);
105	up(&b->io_mutex);
106}
107
108void bch_data_verify(struct cached_dev *dc, struct bio *bio)
109{
110	unsigned int nr_segs = bio_segments(bio);
111	struct bio *check;
112	struct bio_vec bv, cbv;
113	struct bvec_iter iter, citer = { 0 };
114
115	check = bio_kmalloc(nr_segs, GFP_NOIO);
116	if (!check)
117		return;
118	bio_init(check, bio->bi_bdev, check->bi_inline_vecs, nr_segs,
119		 REQ_OP_READ);
120	check->bi_iter.bi_sector = bio->bi_iter.bi_sector;
121	check->bi_iter.bi_size = bio->bi_iter.bi_size;
122
123	bch_bio_map(check, NULL);
124	if (bch_bio_alloc_pages(check, GFP_NOIO))
125		goto out_put;
126
127	submit_bio_wait(check);
128
129	citer.bi_size = UINT_MAX;
130	bio_for_each_segment(bv, bio, iter) {
131		void *p1 = bvec_kmap_local(&bv);
132		void *p2;
133
134		cbv = bio_iter_iovec(check, citer);
135		p2 = bvec_kmap_local(&cbv);
136
137		cache_set_err_on(memcmp(p1, p2, bv.bv_len),
 
 
138				 dc->disk.c,
139				 "verify failed at dev %pg sector %llu",
140				 dc->bdev,
141				 (uint64_t) bio->bi_iter.bi_sector);
142
143		kunmap_local(p2);
144		kunmap_local(p1);
145		bio_advance_iter(check, &citer, bv.bv_len);
146	}
147
148	bio_free_pages(check);
149out_put:
150	bio_uninit(check);
151	kfree(check);
152}
153
154#endif
155
156#ifdef CONFIG_DEBUG_FS
157
158/* XXX: cache set refcounting */
159
160struct dump_iterator {
161	char			buf[PAGE_SIZE];
162	size_t			bytes;
163	struct cache_set	*c;
164	struct keybuf		keys;
165};
166
167static bool dump_pred(struct keybuf *buf, struct bkey *k)
168{
169	return true;
170}
171
172static ssize_t bch_dump_read(struct file *file, char __user *buf,
173			     size_t size, loff_t *ppos)
174{
175	struct dump_iterator *i = file->private_data;
176	ssize_t ret = 0;
177	char kbuf[80];
178
179	while (size) {
180		struct keybuf_key *w;
181		unsigned int bytes = min(i->bytes, size);
182
183		if (copy_to_user(buf, i->buf, bytes))
184			return -EFAULT;
 
185
186		ret	 += bytes;
187		buf	 += bytes;
188		size	 -= bytes;
189		i->bytes -= bytes;
190		memmove(i->buf, i->buf + bytes, i->bytes);
191
192		if (i->bytes)
193			break;
194
195		w = bch_keybuf_next_rescan(i->c, &i->keys, &MAX_KEY, dump_pred);
196		if (!w)
197			break;
198
199		bch_extent_to_text(kbuf, sizeof(kbuf), &w->key);
200		i->bytes = snprintf(i->buf, PAGE_SIZE, "%s\n", kbuf);
201		bch_keybuf_del(&i->keys, w);
202	}
203
204	return ret;
205}
206
207static int bch_dump_open(struct inode *inode, struct file *file)
208{
209	struct cache_set *c = inode->i_private;
210	struct dump_iterator *i;
211
212	i = kzalloc(sizeof(struct dump_iterator), GFP_KERNEL);
213	if (!i)
214		return -ENOMEM;
215
216	file->private_data = i;
217	i->c = c;
218	bch_keybuf_init(&i->keys);
219	i->keys.last_scanned = KEY(0, 0, 0);
220
221	return 0;
222}
223
224static int bch_dump_release(struct inode *inode, struct file *file)
225{
226	kfree(file->private_data);
227	return 0;
228}
229
230static const struct file_operations cache_set_debug_ops = {
231	.owner		= THIS_MODULE,
232	.open		= bch_dump_open,
233	.read		= bch_dump_read,
234	.release	= bch_dump_release
235};
236
237void bch_debug_init_cache_set(struct cache_set *c)
238{
239	if (!IS_ERR_OR_NULL(bcache_debug)) {
240		char name[50];
 
241
242		snprintf(name, 50, "bcache-%pU", c->set_uuid);
243		c->debug = debugfs_create_file(name, 0400, bcache_debug, c,
244					       &cache_set_debug_ops);
245	}
246}
247
248#endif
249
250void bch_debug_exit(void)
251{
252	debugfs_remove_recursive(bcache_debug);
 
253}
254
255void __init bch_debug_init(void)
256{
257	/*
258	 * it is unnecessary to check return value of
259	 * debugfs_create_file(), we should not care
260	 * about this.
261	 */
262	bcache_debug = debugfs_create_dir("bcache", NULL);
263}
v4.10.11
 
  1/*
  2 * Assorted bcache debug code
  3 *
  4 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
  5 * Copyright 2012 Google, Inc.
  6 */
  7
  8#include "bcache.h"
  9#include "btree.h"
 10#include "debug.h"
 11#include "extents.h"
 12
 13#include <linux/console.h>
 14#include <linux/debugfs.h>
 15#include <linux/module.h>
 16#include <linux/random.h>
 17#include <linux/seq_file.h>
 18
 19static struct dentry *debug;
 20
 21#ifdef CONFIG_BCACHE_DEBUG
 22
 23#define for_each_written_bset(b, start, i)				\
 24	for (i = (start);						\
 25	     (void *) i < (void *) (start) + (KEY_SIZE(&b->key) << 9) &&\
 26	     i->seq == (start)->seq;					\
 27	     i = (void *) i + set_blocks(i, block_bytes(b->c)) *	\
 28		 block_bytes(b->c))
 29
 30void bch_btree_verify(struct btree *b)
 31{
 32	struct btree *v = b->c->verify_data;
 33	struct bset *ondisk, *sorted, *inmemory;
 34	struct bio *bio;
 35
 36	if (!b->c->verify || !b->c->verify_ondisk)
 37		return;
 38
 39	down(&b->io_mutex);
 40	mutex_lock(&b->c->verify_lock);
 41
 42	ondisk = b->c->verify_ondisk;
 43	sorted = b->c->verify_data->keys.set->data;
 44	inmemory = b->keys.set->data;
 45
 46	bkey_copy(&v->key, &b->key);
 47	v->written = 0;
 48	v->level = b->level;
 49	v->keys.ops = b->keys.ops;
 50
 51	bio = bch_bbio_alloc(b->c);
 52	bio->bi_bdev		= PTR_CACHE(b->c, &b->key, 0)->bdev;
 53	bio->bi_iter.bi_sector	= PTR_OFFSET(&b->key, 0);
 54	bio->bi_iter.bi_size	= KEY_SIZE(&v->key) << 9;
 55	bio->bi_opf		= REQ_OP_READ | REQ_META;
 56	bch_bio_map(bio, sorted);
 57
 58	submit_bio_wait(bio);
 59	bch_bbio_free(bio, b->c);
 60
 61	memcpy(ondisk, sorted, KEY_SIZE(&v->key) << 9);
 62
 63	bch_btree_node_read_done(v);
 64	sorted = v->keys.set->data;
 65
 66	if (inmemory->keys != sorted->keys ||
 67	    memcmp(inmemory->start,
 68		   sorted->start,
 69		   (void *) bset_bkey_last(inmemory) - (void *) inmemory->start)) {
 
 70		struct bset *i;
 71		unsigned j;
 72
 73		console_lock();
 74
 75		printk(KERN_ERR "*** in memory:\n");
 76		bch_dump_bset(&b->keys, inmemory, 0);
 77
 78		printk(KERN_ERR "*** read back in:\n");
 79		bch_dump_bset(&v->keys, sorted, 0);
 80
 81		for_each_written_bset(b, ondisk, i) {
 82			unsigned block = ((void *) i - (void *) ondisk) /
 83				block_bytes(b->c);
 84
 85			printk(KERN_ERR "*** on disk block %u:\n", block);
 86			bch_dump_bset(&b->keys, i, block);
 87		}
 88
 89		printk(KERN_ERR "*** block %zu not written\n",
 90		       ((void *) i - (void *) ondisk) / block_bytes(b->c));
 91
 92		for (j = 0; j < inmemory->keys; j++)
 93			if (inmemory->d[j] != sorted->d[j])
 94				break;
 95
 96		printk(KERN_ERR "b->written %u\n", b->written);
 97
 98		console_unlock();
 99		panic("verify failed at %u\n", j);
100	}
101
102	mutex_unlock(&b->c->verify_lock);
103	up(&b->io_mutex);
104}
105
106void bch_data_verify(struct cached_dev *dc, struct bio *bio)
107{
108	char name[BDEVNAME_SIZE];
109	struct bio *check;
110	struct bio_vec bv, cbv;
111	struct bvec_iter iter, citer = { 0 };
112
113	check = bio_clone(bio, GFP_NOIO);
114	if (!check)
115		return;
116	check->bi_opf = REQ_OP_READ;
 
 
 
117
118	if (bio_alloc_pages(check, GFP_NOIO))
 
119		goto out_put;
120
121	submit_bio_wait(check);
122
123	citer.bi_size = UINT_MAX;
124	bio_for_each_segment(bv, bio, iter) {
125		void *p1 = kmap_atomic(bv.bv_page);
126		void *p2;
127
128		cbv = bio_iter_iovec(check, citer);
129		p2 = page_address(cbv.bv_page);
130
131		cache_set_err_on(memcmp(p1 + bv.bv_offset,
132					p2 + bv.bv_offset,
133					bv.bv_len),
134				 dc->disk.c,
135				 "verify failed at dev %s sector %llu",
136				 bdevname(dc->bdev, name),
137				 (uint64_t) bio->bi_iter.bi_sector);
138
139		kunmap_atomic(p1);
 
140		bio_advance_iter(check, &citer, bv.bv_len);
141	}
142
143	bio_free_pages(check);
144out_put:
145	bio_put(check);
 
146}
147
148#endif
149
150#ifdef CONFIG_DEBUG_FS
151
152/* XXX: cache set refcounting */
153
154struct dump_iterator {
155	char			buf[PAGE_SIZE];
156	size_t			bytes;
157	struct cache_set	*c;
158	struct keybuf		keys;
159};
160
161static bool dump_pred(struct keybuf *buf, struct bkey *k)
162{
163	return true;
164}
165
166static ssize_t bch_dump_read(struct file *file, char __user *buf,
167			     size_t size, loff_t *ppos)
168{
169	struct dump_iterator *i = file->private_data;
170	ssize_t ret = 0;
171	char kbuf[80];
172
173	while (size) {
174		struct keybuf_key *w;
175		unsigned bytes = min(i->bytes, size);
176
177		int err = copy_to_user(buf, i->buf, bytes);
178		if (err)
179			return err;
180
181		ret	 += bytes;
182		buf	 += bytes;
183		size	 -= bytes;
184		i->bytes -= bytes;
185		memmove(i->buf, i->buf + bytes, i->bytes);
186
187		if (i->bytes)
188			break;
189
190		w = bch_keybuf_next_rescan(i->c, &i->keys, &MAX_KEY, dump_pred);
191		if (!w)
192			break;
193
194		bch_extent_to_text(kbuf, sizeof(kbuf), &w->key);
195		i->bytes = snprintf(i->buf, PAGE_SIZE, "%s\n", kbuf);
196		bch_keybuf_del(&i->keys, w);
197	}
198
199	return ret;
200}
201
202static int bch_dump_open(struct inode *inode, struct file *file)
203{
204	struct cache_set *c = inode->i_private;
205	struct dump_iterator *i;
206
207	i = kzalloc(sizeof(struct dump_iterator), GFP_KERNEL);
208	if (!i)
209		return -ENOMEM;
210
211	file->private_data = i;
212	i->c = c;
213	bch_keybuf_init(&i->keys);
214	i->keys.last_scanned = KEY(0, 0, 0);
215
216	return 0;
217}
218
219static int bch_dump_release(struct inode *inode, struct file *file)
220{
221	kfree(file->private_data);
222	return 0;
223}
224
225static const struct file_operations cache_set_debug_ops = {
226	.owner		= THIS_MODULE,
227	.open		= bch_dump_open,
228	.read		= bch_dump_read,
229	.release	= bch_dump_release
230};
231
232void bch_debug_init_cache_set(struct cache_set *c)
233{
234	if (!IS_ERR_OR_NULL(debug)) {
235		char name[50];
236		snprintf(name, 50, "bcache-%pU", c->sb.set_uuid);
237
238		c->debug = debugfs_create_file(name, 0400, debug, c,
 
239					       &cache_set_debug_ops);
240	}
241}
242
243#endif
244
245void bch_debug_exit(void)
246{
247	if (!IS_ERR_OR_NULL(debug))
248		debugfs_remove_recursive(debug);
249}
250
251int __init bch_debug_init(struct kobject *kobj)
252{
253	int ret = 0;
254
255	debug = debugfs_create_dir("bcache", NULL);
256	return ret;
 
 
257}