Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Assorted bcache debug code
4 *
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
7 */
8
9#include "bcache.h"
10#include "btree.h"
11#include "debug.h"
12#include "extents.h"
13
14#include <linux/console.h>
15#include <linux/debugfs.h>
16#include <linux/module.h>
17#include <linux/random.h>
18#include <linux/seq_file.h>
19
20struct dentry *bcache_debug;
21
22#ifdef CONFIG_BCACHE_DEBUG
23
24#define for_each_written_bset(b, start, i) \
25 for (i = (start); \
26 (void *) i < (void *) (start) + (KEY_SIZE(&b->key) << 9) &&\
27 i->seq == (start)->seq; \
28 i = (void *) i + set_blocks(i, block_bytes(b->c)) * \
29 block_bytes(b->c))
30
31void bch_btree_verify(struct btree *b)
32{
33 struct btree *v = b->c->verify_data;
34 struct bset *ondisk, *sorted, *inmemory;
35 struct bio *bio;
36
37 if (!b->c->verify || !b->c->verify_ondisk)
38 return;
39
40 down(&b->io_mutex);
41 mutex_lock(&b->c->verify_lock);
42
43 ondisk = b->c->verify_ondisk;
44 sorted = b->c->verify_data->keys.set->data;
45 inmemory = b->keys.set->data;
46
47 bkey_copy(&v->key, &b->key);
48 v->written = 0;
49 v->level = b->level;
50 v->keys.ops = b->keys.ops;
51
52 bio = bch_bbio_alloc(b->c);
53 bio_set_dev(bio, PTR_CACHE(b->c, &b->key, 0)->bdev);
54 bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0);
55 bio->bi_iter.bi_size = KEY_SIZE(&v->key) << 9;
56 bio->bi_opf = REQ_OP_READ | REQ_META;
57 bch_bio_map(bio, sorted);
58
59 submit_bio_wait(bio);
60 bch_bbio_free(bio, b->c);
61
62 memcpy(ondisk, sorted, KEY_SIZE(&v->key) << 9);
63
64 bch_btree_node_read_done(v);
65 sorted = v->keys.set->data;
66
67 if (inmemory->keys != sorted->keys ||
68 memcmp(inmemory->start,
69 sorted->start,
70 (void *) bset_bkey_last(inmemory) -
71 (void *) inmemory->start)) {
72 struct bset *i;
73 unsigned int j;
74
75 console_lock();
76
77 pr_err("*** in memory:\n");
78 bch_dump_bset(&b->keys, inmemory, 0);
79
80 pr_err("*** read back in:\n");
81 bch_dump_bset(&v->keys, sorted, 0);
82
83 for_each_written_bset(b, ondisk, i) {
84 unsigned int block = ((void *) i - (void *) ondisk) /
85 block_bytes(b->c);
86
87 pr_err("*** on disk block %u:\n", block);
88 bch_dump_bset(&b->keys, i, block);
89 }
90
91 pr_err("*** block %zu not written\n",
92 ((void *) i - (void *) ondisk) / block_bytes(b->c));
93
94 for (j = 0; j < inmemory->keys; j++)
95 if (inmemory->d[j] != sorted->d[j])
96 break;
97
98 pr_err("b->written %u\n", b->written);
99
100 console_unlock();
101 panic("verify failed at %u\n", j);
102 }
103
104 mutex_unlock(&b->c->verify_lock);
105 up(&b->io_mutex);
106}
107
108void bch_data_verify(struct cached_dev *dc, struct bio *bio)
109{
110 struct bio *check;
111 struct bio_vec bv, cbv;
112 struct bvec_iter iter, citer = { 0 };
113
114 check = bio_kmalloc(GFP_NOIO, bio_segments(bio));
115 if (!check)
116 return;
117 check->bi_disk = bio->bi_disk;
118 check->bi_opf = REQ_OP_READ;
119 check->bi_iter.bi_sector = bio->bi_iter.bi_sector;
120 check->bi_iter.bi_size = bio->bi_iter.bi_size;
121
122 bch_bio_map(check, NULL);
123 if (bch_bio_alloc_pages(check, GFP_NOIO))
124 goto out_put;
125
126 submit_bio_wait(check);
127
128 citer.bi_size = UINT_MAX;
129 bio_for_each_segment(bv, bio, iter) {
130 void *p1 = kmap_atomic(bv.bv_page);
131 void *p2;
132
133 cbv = bio_iter_iovec(check, citer);
134 p2 = page_address(cbv.bv_page);
135
136 cache_set_err_on(memcmp(p1 + bv.bv_offset,
137 p2 + bv.bv_offset,
138 bv.bv_len),
139 dc->disk.c,
140 "verify failed at dev %s sector %llu",
141 dc->backing_dev_name,
142 (uint64_t) bio->bi_iter.bi_sector);
143
144 kunmap_atomic(p1);
145 bio_advance_iter(check, &citer, bv.bv_len);
146 }
147
148 bio_free_pages(check);
149out_put:
150 bio_put(check);
151}
152
153#endif
154
155#ifdef CONFIG_DEBUG_FS
156
157/* XXX: cache set refcounting */
158
159struct dump_iterator {
160 char buf[PAGE_SIZE];
161 size_t bytes;
162 struct cache_set *c;
163 struct keybuf keys;
164};
165
166static bool dump_pred(struct keybuf *buf, struct bkey *k)
167{
168 return true;
169}
170
171static ssize_t bch_dump_read(struct file *file, char __user *buf,
172 size_t size, loff_t *ppos)
173{
174 struct dump_iterator *i = file->private_data;
175 ssize_t ret = 0;
176 char kbuf[80];
177
178 while (size) {
179 struct keybuf_key *w;
180 unsigned int bytes = min(i->bytes, size);
181
182 if (copy_to_user(buf, i->buf, bytes))
183 return -EFAULT;
184
185 ret += bytes;
186 buf += bytes;
187 size -= bytes;
188 i->bytes -= bytes;
189 memmove(i->buf, i->buf + bytes, i->bytes);
190
191 if (i->bytes)
192 break;
193
194 w = bch_keybuf_next_rescan(i->c, &i->keys, &MAX_KEY, dump_pred);
195 if (!w)
196 break;
197
198 bch_extent_to_text(kbuf, sizeof(kbuf), &w->key);
199 i->bytes = snprintf(i->buf, PAGE_SIZE, "%s\n", kbuf);
200 bch_keybuf_del(&i->keys, w);
201 }
202
203 return ret;
204}
205
206static int bch_dump_open(struct inode *inode, struct file *file)
207{
208 struct cache_set *c = inode->i_private;
209 struct dump_iterator *i;
210
211 i = kzalloc(sizeof(struct dump_iterator), GFP_KERNEL);
212 if (!i)
213 return -ENOMEM;
214
215 file->private_data = i;
216 i->c = c;
217 bch_keybuf_init(&i->keys);
218 i->keys.last_scanned = KEY(0, 0, 0);
219
220 return 0;
221}
222
223static int bch_dump_release(struct inode *inode, struct file *file)
224{
225 kfree(file->private_data);
226 return 0;
227}
228
229static const struct file_operations cache_set_debug_ops = {
230 .owner = THIS_MODULE,
231 .open = bch_dump_open,
232 .read = bch_dump_read,
233 .release = bch_dump_release
234};
235
236void bch_debug_init_cache_set(struct cache_set *c)
237{
238 if (!IS_ERR_OR_NULL(bcache_debug)) {
239 char name[50];
240
241 snprintf(name, 50, "bcache-%pU", c->sb.set_uuid);
242 c->debug = debugfs_create_file(name, 0400, bcache_debug, c,
243 &cache_set_debug_ops);
244 }
245}
246
247#endif
248
249void bch_debug_exit(void)
250{
251 debugfs_remove_recursive(bcache_debug);
252}
253
254void __init bch_debug_init(void)
255{
256 /*
257 * it is unnecessary to check return value of
258 * debugfs_create_file(), we should not care
259 * about this.
260 */
261 bcache_debug = debugfs_create_dir("bcache", NULL);
262}
1/*
2 * Assorted bcache debug code
3 *
4 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
5 * Copyright 2012 Google, Inc.
6 */
7
8#include "bcache.h"
9#include "btree.h"
10#include "debug.h"
11#include "extents.h"
12
13#include <linux/console.h>
14#include <linux/debugfs.h>
15#include <linux/module.h>
16#include <linux/random.h>
17#include <linux/seq_file.h>
18
19static struct dentry *debug;
20
21#ifdef CONFIG_BCACHE_DEBUG
22
23#define for_each_written_bset(b, start, i) \
24 for (i = (start); \
25 (void *) i < (void *) (start) + (KEY_SIZE(&b->key) << 9) &&\
26 i->seq == (start)->seq; \
27 i = (void *) i + set_blocks(i, block_bytes(b->c)) * \
28 block_bytes(b->c))
29
30void bch_btree_verify(struct btree *b)
31{
32 struct btree *v = b->c->verify_data;
33 struct bset *ondisk, *sorted, *inmemory;
34 struct bio *bio;
35
36 if (!b->c->verify || !b->c->verify_ondisk)
37 return;
38
39 down(&b->io_mutex);
40 mutex_lock(&b->c->verify_lock);
41
42 ondisk = b->c->verify_ondisk;
43 sorted = b->c->verify_data->keys.set->data;
44 inmemory = b->keys.set->data;
45
46 bkey_copy(&v->key, &b->key);
47 v->written = 0;
48 v->level = b->level;
49 v->keys.ops = b->keys.ops;
50
51 bio = bch_bbio_alloc(b->c);
52 bio->bi_bdev = PTR_CACHE(b->c, &b->key, 0)->bdev;
53 bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0);
54 bio->bi_iter.bi_size = KEY_SIZE(&v->key) << 9;
55 bch_bio_map(bio, sorted);
56
57 submit_bio_wait(REQ_META|READ_SYNC, bio);
58 bch_bbio_free(bio, b->c);
59
60 memcpy(ondisk, sorted, KEY_SIZE(&v->key) << 9);
61
62 bch_btree_node_read_done(v);
63 sorted = v->keys.set->data;
64
65 if (inmemory->keys != sorted->keys ||
66 memcmp(inmemory->start,
67 sorted->start,
68 (void *) bset_bkey_last(inmemory) - (void *) inmemory->start)) {
69 struct bset *i;
70 unsigned j;
71
72 console_lock();
73
74 printk(KERN_ERR "*** in memory:\n");
75 bch_dump_bset(&b->keys, inmemory, 0);
76
77 printk(KERN_ERR "*** read back in:\n");
78 bch_dump_bset(&v->keys, sorted, 0);
79
80 for_each_written_bset(b, ondisk, i) {
81 unsigned block = ((void *) i - (void *) ondisk) /
82 block_bytes(b->c);
83
84 printk(KERN_ERR "*** on disk block %u:\n", block);
85 bch_dump_bset(&b->keys, i, block);
86 }
87
88 printk(KERN_ERR "*** block %zu not written\n",
89 ((void *) i - (void *) ondisk) / block_bytes(b->c));
90
91 for (j = 0; j < inmemory->keys; j++)
92 if (inmemory->d[j] != sorted->d[j])
93 break;
94
95 printk(KERN_ERR "b->written %u\n", b->written);
96
97 console_unlock();
98 panic("verify failed at %u\n", j);
99 }
100
101 mutex_unlock(&b->c->verify_lock);
102 up(&b->io_mutex);
103}
104
105void bch_data_verify(struct cached_dev *dc, struct bio *bio)
106{
107 char name[BDEVNAME_SIZE];
108 struct bio *check;
109 struct bio_vec bv, *bv2;
110 struct bvec_iter iter;
111 int i;
112
113 check = bio_clone(bio, GFP_NOIO);
114 if (!check)
115 return;
116
117 if (bio_alloc_pages(check, GFP_NOIO))
118 goto out_put;
119
120 submit_bio_wait(READ_SYNC, check);
121
122 bio_for_each_segment(bv, bio, iter) {
123 void *p1 = kmap_atomic(bv.bv_page);
124 void *p2 = page_address(check->bi_io_vec[iter.bi_idx].bv_page);
125
126 cache_set_err_on(memcmp(p1 + bv.bv_offset,
127 p2 + bv.bv_offset,
128 bv.bv_len),
129 dc->disk.c,
130 "verify failed at dev %s sector %llu",
131 bdevname(dc->bdev, name),
132 (uint64_t) bio->bi_iter.bi_sector);
133
134 kunmap_atomic(p1);
135 }
136
137 bio_for_each_segment_all(bv2, check, i)
138 __free_page(bv2->bv_page);
139out_put:
140 bio_put(check);
141}
142
143#endif
144
145#ifdef CONFIG_DEBUG_FS
146
147/* XXX: cache set refcounting */
148
149struct dump_iterator {
150 char buf[PAGE_SIZE];
151 size_t bytes;
152 struct cache_set *c;
153 struct keybuf keys;
154};
155
156static bool dump_pred(struct keybuf *buf, struct bkey *k)
157{
158 return true;
159}
160
161static ssize_t bch_dump_read(struct file *file, char __user *buf,
162 size_t size, loff_t *ppos)
163{
164 struct dump_iterator *i = file->private_data;
165 ssize_t ret = 0;
166 char kbuf[80];
167
168 while (size) {
169 struct keybuf_key *w;
170 unsigned bytes = min(i->bytes, size);
171
172 int err = copy_to_user(buf, i->buf, bytes);
173 if (err)
174 return err;
175
176 ret += bytes;
177 buf += bytes;
178 size -= bytes;
179 i->bytes -= bytes;
180 memmove(i->buf, i->buf + bytes, i->bytes);
181
182 if (i->bytes)
183 break;
184
185 w = bch_keybuf_next_rescan(i->c, &i->keys, &MAX_KEY, dump_pred);
186 if (!w)
187 break;
188
189 bch_extent_to_text(kbuf, sizeof(kbuf), &w->key);
190 i->bytes = snprintf(i->buf, PAGE_SIZE, "%s\n", kbuf);
191 bch_keybuf_del(&i->keys, w);
192 }
193
194 return ret;
195}
196
197static int bch_dump_open(struct inode *inode, struct file *file)
198{
199 struct cache_set *c = inode->i_private;
200 struct dump_iterator *i;
201
202 i = kzalloc(sizeof(struct dump_iterator), GFP_KERNEL);
203 if (!i)
204 return -ENOMEM;
205
206 file->private_data = i;
207 i->c = c;
208 bch_keybuf_init(&i->keys);
209 i->keys.last_scanned = KEY(0, 0, 0);
210
211 return 0;
212}
213
214static int bch_dump_release(struct inode *inode, struct file *file)
215{
216 kfree(file->private_data);
217 return 0;
218}
219
220static const struct file_operations cache_set_debug_ops = {
221 .owner = THIS_MODULE,
222 .open = bch_dump_open,
223 .read = bch_dump_read,
224 .release = bch_dump_release
225};
226
227void bch_debug_init_cache_set(struct cache_set *c)
228{
229 if (!IS_ERR_OR_NULL(debug)) {
230 char name[50];
231 snprintf(name, 50, "bcache-%pU", c->sb.set_uuid);
232
233 c->debug = debugfs_create_file(name, 0400, debug, c,
234 &cache_set_debug_ops);
235 }
236}
237
238#endif
239
240void bch_debug_exit(void)
241{
242 if (!IS_ERR_OR_NULL(debug))
243 debugfs_remove_recursive(debug);
244}
245
246int __init bch_debug_init(struct kobject *kobj)
247{
248 int ret = 0;
249
250 debug = debugfs_create_dir("bcache", NULL);
251 return ret;
252}