Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (C) 2023 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <djwong@kernel.org>
5 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_trans_resv.h"
11#include "xfs_mount.h"
12#include "xfs_sysfs.h"
13#include "xfs_btree.h"
14#include "xfs_super.h"
15#include "scrub/scrub.h"
16#include "scrub/stats.h"
17#include "scrub/trace.h"
18
19struct xchk_scrub_stats {
20 /* all 32-bit counters here */
21
22 /* checking stats */
23 uint32_t invocations;
24 uint32_t clean;
25 uint32_t corrupt;
26 uint32_t preen;
27 uint32_t xfail;
28 uint32_t xcorrupt;
29 uint32_t incomplete;
30 uint32_t warning;
31 uint32_t retries;
32
33 /* repair stats */
34 uint32_t repair_invocations;
35 uint32_t repair_success;
36
37 /* all 64-bit items here */
38
39 /* runtimes */
40 uint64_t checktime_us;
41 uint64_t repairtime_us;
42
43 /* non-counter state must go at the end for clearall */
44 spinlock_t css_lock;
45};
46
47struct xchk_stats {
48 struct dentry *cs_debugfs;
49 struct xchk_scrub_stats cs_stats[XFS_SCRUB_TYPE_NR];
50};
51
52
53static struct xchk_stats global_stats;
54
55static const char *name_map[XFS_SCRUB_TYPE_NR] = {
56 [XFS_SCRUB_TYPE_SB] = "sb",
57 [XFS_SCRUB_TYPE_AGF] = "agf",
58 [XFS_SCRUB_TYPE_AGFL] = "agfl",
59 [XFS_SCRUB_TYPE_AGI] = "agi",
60 [XFS_SCRUB_TYPE_BNOBT] = "bnobt",
61 [XFS_SCRUB_TYPE_CNTBT] = "cntbt",
62 [XFS_SCRUB_TYPE_INOBT] = "inobt",
63 [XFS_SCRUB_TYPE_FINOBT] = "finobt",
64 [XFS_SCRUB_TYPE_RMAPBT] = "rmapbt",
65 [XFS_SCRUB_TYPE_REFCNTBT] = "refcountbt",
66 [XFS_SCRUB_TYPE_INODE] = "inode",
67 [XFS_SCRUB_TYPE_BMBTD] = "bmapbtd",
68 [XFS_SCRUB_TYPE_BMBTA] = "bmapbta",
69 [XFS_SCRUB_TYPE_BMBTC] = "bmapbtc",
70 [XFS_SCRUB_TYPE_DIR] = "directory",
71 [XFS_SCRUB_TYPE_XATTR] = "xattr",
72 [XFS_SCRUB_TYPE_SYMLINK] = "symlink",
73 [XFS_SCRUB_TYPE_PARENT] = "parent",
74 [XFS_SCRUB_TYPE_RTBITMAP] = "rtbitmap",
75 [XFS_SCRUB_TYPE_RTSUM] = "rtsummary",
76 [XFS_SCRUB_TYPE_UQUOTA] = "usrquota",
77 [XFS_SCRUB_TYPE_GQUOTA] = "grpquota",
78 [XFS_SCRUB_TYPE_PQUOTA] = "prjquota",
79 [XFS_SCRUB_TYPE_FSCOUNTERS] = "fscounters",
80 [XFS_SCRUB_TYPE_QUOTACHECK] = "quotacheck",
81 [XFS_SCRUB_TYPE_NLINKS] = "nlinks",
82};
83
84/* Format the scrub stats into a text buffer, similar to pcp style. */
85STATIC ssize_t
86xchk_stats_format(
87 struct xchk_stats *cs,
88 char *buf,
89 size_t remaining)
90{
91 struct xchk_scrub_stats *css = &cs->cs_stats[0];
92 unsigned int i;
93 ssize_t copied = 0;
94 int ret = 0;
95
96 for (i = 0; i < XFS_SCRUB_TYPE_NR; i++, css++) {
97 if (!name_map[i])
98 continue;
99
100 ret = scnprintf(buf, remaining,
101 "%s %u %u %u %u %u %u %u %u %u %llu %u %u %llu\n",
102 name_map[i],
103 (unsigned int)css->invocations,
104 (unsigned int)css->clean,
105 (unsigned int)css->corrupt,
106 (unsigned int)css->preen,
107 (unsigned int)css->xfail,
108 (unsigned int)css->xcorrupt,
109 (unsigned int)css->incomplete,
110 (unsigned int)css->warning,
111 (unsigned int)css->retries,
112 (unsigned long long)css->checktime_us,
113 (unsigned int)css->repair_invocations,
114 (unsigned int)css->repair_success,
115 (unsigned long long)css->repairtime_us);
116 if (ret <= 0)
117 break;
118
119 remaining -= ret;
120 copied += ret;
121 buf += ret;
122 }
123
124 return copied > 0 ? copied : ret;
125}
126
127/* Estimate the worst case buffer size required to hold the whole report. */
128STATIC size_t
129xchk_stats_estimate_bufsize(
130 struct xchk_stats *cs)
131{
132 struct xchk_scrub_stats *css = &cs->cs_stats[0];
133 unsigned int i;
134 size_t field_width;
135 size_t ret = 0;
136
137 /* 4294967296 plus one space for each u32 field */
138 field_width = 11 * (offsetof(struct xchk_scrub_stats, checktime_us) /
139 sizeof(uint32_t));
140
141 /* 18446744073709551615 plus one space for each u64 field */
142 field_width += 21 * ((offsetof(struct xchk_scrub_stats, css_lock) -
143 offsetof(struct xchk_scrub_stats, checktime_us)) /
144 sizeof(uint64_t));
145
146 for (i = 0; i < XFS_SCRUB_TYPE_NR; i++, css++) {
147 if (!name_map[i])
148 continue;
149
150 /* name plus one space */
151 ret += 1 + strlen(name_map[i]);
152
153 /* all fields, plus newline */
154 ret += field_width + 1;
155 }
156
157 return ret;
158}
159
160/* Clear all counters. */
161STATIC void
162xchk_stats_clearall(
163 struct xchk_stats *cs)
164{
165 struct xchk_scrub_stats *css = &cs->cs_stats[0];
166 unsigned int i;
167
168 for (i = 0; i < XFS_SCRUB_TYPE_NR; i++, css++) {
169 spin_lock(&css->css_lock);
170 memset(css, 0, offsetof(struct xchk_scrub_stats, css_lock));
171 spin_unlock(&css->css_lock);
172 }
173}
174
175#define XFS_SCRUB_OFLAG_UNCLEAN (XFS_SCRUB_OFLAG_CORRUPT | \
176 XFS_SCRUB_OFLAG_PREEN | \
177 XFS_SCRUB_OFLAG_XFAIL | \
178 XFS_SCRUB_OFLAG_XCORRUPT | \
179 XFS_SCRUB_OFLAG_INCOMPLETE | \
180 XFS_SCRUB_OFLAG_WARNING)
181
182STATIC void
183xchk_stats_merge_one(
184 struct xchk_stats *cs,
185 const struct xfs_scrub_metadata *sm,
186 const struct xchk_stats_run *run)
187{
188 struct xchk_scrub_stats *css;
189
190 if (sm->sm_type >= XFS_SCRUB_TYPE_NR) {
191 ASSERT(sm->sm_type < XFS_SCRUB_TYPE_NR);
192 return;
193 }
194
195 css = &cs->cs_stats[sm->sm_type];
196 spin_lock(&css->css_lock);
197 css->invocations++;
198 if (!(sm->sm_flags & XFS_SCRUB_OFLAG_UNCLEAN))
199 css->clean++;
200 if (sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
201 css->corrupt++;
202 if (sm->sm_flags & XFS_SCRUB_OFLAG_PREEN)
203 css->preen++;
204 if (sm->sm_flags & XFS_SCRUB_OFLAG_XFAIL)
205 css->xfail++;
206 if (sm->sm_flags & XFS_SCRUB_OFLAG_XCORRUPT)
207 css->xcorrupt++;
208 if (sm->sm_flags & XFS_SCRUB_OFLAG_INCOMPLETE)
209 css->incomplete++;
210 if (sm->sm_flags & XFS_SCRUB_OFLAG_WARNING)
211 css->warning++;
212 css->retries += run->retries;
213 css->checktime_us += howmany_64(run->scrub_ns, NSEC_PER_USEC);
214
215 if (run->repair_attempted)
216 css->repair_invocations++;
217 if (run->repair_succeeded)
218 css->repair_success++;
219 css->repairtime_us += howmany_64(run->repair_ns, NSEC_PER_USEC);
220 spin_unlock(&css->css_lock);
221}
222
223/* Merge these scrub-run stats into the global and mount stat data. */
224void
225xchk_stats_merge(
226 struct xfs_mount *mp,
227 const struct xfs_scrub_metadata *sm,
228 const struct xchk_stats_run *run)
229{
230 xchk_stats_merge_one(&global_stats, sm, run);
231 xchk_stats_merge_one(mp->m_scrub_stats, sm, run);
232}
233
234/* debugfs boilerplate */
235
236static ssize_t
237xchk_scrub_stats_read(
238 struct file *file,
239 char __user *ubuf,
240 size_t count,
241 loff_t *ppos)
242{
243 struct xchk_stats *cs = file->private_data;
244 char *buf;
245 size_t bufsize;
246 ssize_t avail, ret;
247
248 /*
249 * This generates stringly snapshot of all the scrub counters, so we
250 * do not want userspace to receive garbled text from multiple calls.
251 * If the file position is greater than 0, return a short read.
252 */
253 if (*ppos > 0)
254 return 0;
255
256 bufsize = xchk_stats_estimate_bufsize(cs);
257
258 buf = kvmalloc(bufsize, XCHK_GFP_FLAGS);
259 if (!buf)
260 return -ENOMEM;
261
262 avail = xchk_stats_format(cs, buf, bufsize);
263 if (avail < 0) {
264 ret = avail;
265 goto out;
266 }
267
268 ret = simple_read_from_buffer(ubuf, count, ppos, buf, avail);
269out:
270 kvfree(buf);
271 return ret;
272}
273
274static const struct file_operations scrub_stats_fops = {
275 .open = simple_open,
276 .read = xchk_scrub_stats_read,
277};
278
279static ssize_t
280xchk_clear_scrub_stats_write(
281 struct file *file,
282 const char __user *ubuf,
283 size_t count,
284 loff_t *ppos)
285{
286 struct xchk_stats *cs = file->private_data;
287 unsigned int val;
288 int ret;
289
290 ret = kstrtouint_from_user(ubuf, count, 0, &val);
291 if (ret)
292 return ret;
293
294 if (val != 1)
295 return -EINVAL;
296
297 xchk_stats_clearall(cs);
298 return count;
299}
300
301static const struct file_operations clear_scrub_stats_fops = {
302 .open = simple_open,
303 .write = xchk_clear_scrub_stats_write,
304};
305
306/* Initialize the stats object. */
307STATIC int
308xchk_stats_init(
309 struct xchk_stats *cs,
310 struct xfs_mount *mp)
311{
312 struct xchk_scrub_stats *css = &cs->cs_stats[0];
313 unsigned int i;
314
315 for (i = 0; i < XFS_SCRUB_TYPE_NR; i++, css++)
316 spin_lock_init(&css->css_lock);
317
318 return 0;
319}
320
321/* Connect the stats object to debugfs. */
322void
323xchk_stats_register(
324 struct xchk_stats *cs,
325 struct dentry *parent)
326{
327 if (!parent)
328 return;
329
330 cs->cs_debugfs = xfs_debugfs_mkdir("scrub", parent);
331 if (!cs->cs_debugfs)
332 return;
333
334 debugfs_create_file("stats", 0444, cs->cs_debugfs, cs,
335 &scrub_stats_fops);
336 debugfs_create_file("clear_stats", 0200, cs->cs_debugfs, cs,
337 &clear_scrub_stats_fops);
338}
339
340/* Free all resources related to the stats object. */
341STATIC int
342xchk_stats_teardown(
343 struct xchk_stats *cs)
344{
345 return 0;
346}
347
348/* Disconnect the stats object from debugfs. */
349void
350xchk_stats_unregister(
351 struct xchk_stats *cs)
352{
353 debugfs_remove(cs->cs_debugfs);
354}
355
356/* Initialize global stats and register them */
357int __init
358xchk_global_stats_setup(
359 struct dentry *parent)
360{
361 int error;
362
363 error = xchk_stats_init(&global_stats, NULL);
364 if (error)
365 return error;
366
367 xchk_stats_register(&global_stats, parent);
368 return 0;
369}
370
371/* Unregister global stats and tear them down */
372void
373xchk_global_stats_teardown(void)
374{
375 xchk_stats_unregister(&global_stats);
376 xchk_stats_teardown(&global_stats);
377}
378
379/* Allocate per-mount stats */
380int
381xchk_mount_stats_alloc(
382 struct xfs_mount *mp)
383{
384 struct xchk_stats *cs;
385 int error;
386
387 cs = kvzalloc(sizeof(struct xchk_stats), GFP_KERNEL);
388 if (!cs)
389 return -ENOMEM;
390
391 error = xchk_stats_init(cs, mp);
392 if (error)
393 goto out_free;
394
395 mp->m_scrub_stats = cs;
396 return 0;
397out_free:
398 kvfree(cs);
399 return error;
400}
401
402/* Free per-mount stats */
403void
404xchk_mount_stats_free(
405 struct xfs_mount *mp)
406{
407 xchk_stats_teardown(mp->m_scrub_stats);
408 kvfree(mp->m_scrub_stats);
409 mp->m_scrub_stats = NULL;
410}
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (C) 2023 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <djwong@kernel.org>
5 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_trans_resv.h"
11#include "xfs_mount.h"
12#include "xfs_sysfs.h"
13#include "xfs_btree.h"
14#include "xfs_super.h"
15#include "scrub/scrub.h"
16#include "scrub/stats.h"
17#include "scrub/trace.h"
18
19struct xchk_scrub_stats {
20 /* all 32-bit counters here */
21
22 /* checking stats */
23 uint32_t invocations;
24 uint32_t clean;
25 uint32_t corrupt;
26 uint32_t preen;
27 uint32_t xfail;
28 uint32_t xcorrupt;
29 uint32_t incomplete;
30 uint32_t warning;
31 uint32_t retries;
32
33 /* repair stats */
34 uint32_t repair_invocations;
35 uint32_t repair_success;
36
37 /* all 64-bit items here */
38
39 /* runtimes */
40 uint64_t checktime_us;
41 uint64_t repairtime_us;
42
43 /* non-counter state must go at the end for clearall */
44 spinlock_t css_lock;
45};
46
47struct xchk_stats {
48 struct dentry *cs_debugfs;
49 struct xchk_scrub_stats cs_stats[XFS_SCRUB_TYPE_NR];
50};
51
52
53static struct xchk_stats global_stats;
54
55static const char *name_map[XFS_SCRUB_TYPE_NR] = {
56 [XFS_SCRUB_TYPE_SB] = "sb",
57 [XFS_SCRUB_TYPE_AGF] = "agf",
58 [XFS_SCRUB_TYPE_AGFL] = "agfl",
59 [XFS_SCRUB_TYPE_AGI] = "agi",
60 [XFS_SCRUB_TYPE_BNOBT] = "bnobt",
61 [XFS_SCRUB_TYPE_CNTBT] = "cntbt",
62 [XFS_SCRUB_TYPE_INOBT] = "inobt",
63 [XFS_SCRUB_TYPE_FINOBT] = "finobt",
64 [XFS_SCRUB_TYPE_RMAPBT] = "rmapbt",
65 [XFS_SCRUB_TYPE_REFCNTBT] = "refcountbt",
66 [XFS_SCRUB_TYPE_INODE] = "inode",
67 [XFS_SCRUB_TYPE_BMBTD] = "bmapbtd",
68 [XFS_SCRUB_TYPE_BMBTA] = "bmapbta",
69 [XFS_SCRUB_TYPE_BMBTC] = "bmapbtc",
70 [XFS_SCRUB_TYPE_DIR] = "directory",
71 [XFS_SCRUB_TYPE_XATTR] = "xattr",
72 [XFS_SCRUB_TYPE_SYMLINK] = "symlink",
73 [XFS_SCRUB_TYPE_PARENT] = "parent",
74 [XFS_SCRUB_TYPE_RTBITMAP] = "rtbitmap",
75 [XFS_SCRUB_TYPE_RTSUM] = "rtsummary",
76 [XFS_SCRUB_TYPE_UQUOTA] = "usrquota",
77 [XFS_SCRUB_TYPE_GQUOTA] = "grpquota",
78 [XFS_SCRUB_TYPE_PQUOTA] = "prjquota",
79 [XFS_SCRUB_TYPE_FSCOUNTERS] = "fscounters",
80 [XFS_SCRUB_TYPE_QUOTACHECK] = "quotacheck",
81 [XFS_SCRUB_TYPE_NLINKS] = "nlinks",
82 [XFS_SCRUB_TYPE_DIRTREE] = "dirtree",
83 [XFS_SCRUB_TYPE_METAPATH] = "metapath",
84 [XFS_SCRUB_TYPE_RGSUPER] = "rgsuper",
85};
86
87/* Format the scrub stats into a text buffer, similar to pcp style. */
88STATIC ssize_t
89xchk_stats_format(
90 struct xchk_stats *cs,
91 char *buf,
92 size_t remaining)
93{
94 struct xchk_scrub_stats *css = &cs->cs_stats[0];
95 unsigned int i;
96 ssize_t copied = 0;
97 int ret = 0;
98
99 for (i = 0; i < XFS_SCRUB_TYPE_NR; i++, css++) {
100 if (!name_map[i])
101 continue;
102
103 ret = scnprintf(buf, remaining,
104 "%s %u %u %u %u %u %u %u %u %u %llu %u %u %llu\n",
105 name_map[i],
106 (unsigned int)css->invocations,
107 (unsigned int)css->clean,
108 (unsigned int)css->corrupt,
109 (unsigned int)css->preen,
110 (unsigned int)css->xfail,
111 (unsigned int)css->xcorrupt,
112 (unsigned int)css->incomplete,
113 (unsigned int)css->warning,
114 (unsigned int)css->retries,
115 (unsigned long long)css->checktime_us,
116 (unsigned int)css->repair_invocations,
117 (unsigned int)css->repair_success,
118 (unsigned long long)css->repairtime_us);
119 if (ret <= 0)
120 break;
121
122 remaining -= ret;
123 copied += ret;
124 buf += ret;
125 }
126
127 return copied > 0 ? copied : ret;
128}
129
130/* Estimate the worst case buffer size required to hold the whole report. */
131STATIC size_t
132xchk_stats_estimate_bufsize(
133 struct xchk_stats *cs)
134{
135 struct xchk_scrub_stats *css = &cs->cs_stats[0];
136 unsigned int i;
137 size_t field_width;
138 size_t ret = 0;
139
140 /* 4294967296 plus one space for each u32 field */
141 field_width = 11 * (offsetof(struct xchk_scrub_stats, checktime_us) /
142 sizeof(uint32_t));
143
144 /* 18446744073709551615 plus one space for each u64 field */
145 field_width += 21 * ((offsetof(struct xchk_scrub_stats, css_lock) -
146 offsetof(struct xchk_scrub_stats, checktime_us)) /
147 sizeof(uint64_t));
148
149 for (i = 0; i < XFS_SCRUB_TYPE_NR; i++, css++) {
150 if (!name_map[i])
151 continue;
152
153 /* name plus one space */
154 ret += 1 + strlen(name_map[i]);
155
156 /* all fields, plus newline */
157 ret += field_width + 1;
158 }
159
160 return ret;
161}
162
163/* Clear all counters. */
164STATIC void
165xchk_stats_clearall(
166 struct xchk_stats *cs)
167{
168 struct xchk_scrub_stats *css = &cs->cs_stats[0];
169 unsigned int i;
170
171 for (i = 0; i < XFS_SCRUB_TYPE_NR; i++, css++) {
172 spin_lock(&css->css_lock);
173 memset(css, 0, offsetof(struct xchk_scrub_stats, css_lock));
174 spin_unlock(&css->css_lock);
175 }
176}
177
178#define XFS_SCRUB_OFLAG_UNCLEAN (XFS_SCRUB_OFLAG_CORRUPT | \
179 XFS_SCRUB_OFLAG_PREEN | \
180 XFS_SCRUB_OFLAG_XFAIL | \
181 XFS_SCRUB_OFLAG_XCORRUPT | \
182 XFS_SCRUB_OFLAG_INCOMPLETE | \
183 XFS_SCRUB_OFLAG_WARNING)
184
185STATIC void
186xchk_stats_merge_one(
187 struct xchk_stats *cs,
188 const struct xfs_scrub_metadata *sm,
189 const struct xchk_stats_run *run)
190{
191 struct xchk_scrub_stats *css;
192
193 if (sm->sm_type >= XFS_SCRUB_TYPE_NR) {
194 ASSERT(sm->sm_type < XFS_SCRUB_TYPE_NR);
195 return;
196 }
197
198 css = &cs->cs_stats[sm->sm_type];
199 spin_lock(&css->css_lock);
200 css->invocations++;
201 if (!(sm->sm_flags & XFS_SCRUB_OFLAG_UNCLEAN))
202 css->clean++;
203 if (sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
204 css->corrupt++;
205 if (sm->sm_flags & XFS_SCRUB_OFLAG_PREEN)
206 css->preen++;
207 if (sm->sm_flags & XFS_SCRUB_OFLAG_XFAIL)
208 css->xfail++;
209 if (sm->sm_flags & XFS_SCRUB_OFLAG_XCORRUPT)
210 css->xcorrupt++;
211 if (sm->sm_flags & XFS_SCRUB_OFLAG_INCOMPLETE)
212 css->incomplete++;
213 if (sm->sm_flags & XFS_SCRUB_OFLAG_WARNING)
214 css->warning++;
215 css->retries += run->retries;
216 css->checktime_us += howmany_64(run->scrub_ns, NSEC_PER_USEC);
217
218 if (run->repair_attempted)
219 css->repair_invocations++;
220 if (run->repair_succeeded)
221 css->repair_success++;
222 css->repairtime_us += howmany_64(run->repair_ns, NSEC_PER_USEC);
223 spin_unlock(&css->css_lock);
224}
225
226/* Merge these scrub-run stats into the global and mount stat data. */
227void
228xchk_stats_merge(
229 struct xfs_mount *mp,
230 const struct xfs_scrub_metadata *sm,
231 const struct xchk_stats_run *run)
232{
233 xchk_stats_merge_one(&global_stats, sm, run);
234 xchk_stats_merge_one(mp->m_scrub_stats, sm, run);
235}
236
237/* debugfs boilerplate */
238
239static ssize_t
240xchk_scrub_stats_read(
241 struct file *file,
242 char __user *ubuf,
243 size_t count,
244 loff_t *ppos)
245{
246 struct xchk_stats *cs = file->private_data;
247 char *buf;
248 size_t bufsize;
249 ssize_t avail, ret;
250
251 /*
252 * This generates stringly snapshot of all the scrub counters, so we
253 * do not want userspace to receive garbled text from multiple calls.
254 * If the file position is greater than 0, return a short read.
255 */
256 if (*ppos > 0)
257 return 0;
258
259 bufsize = xchk_stats_estimate_bufsize(cs);
260
261 buf = kvmalloc(bufsize, XCHK_GFP_FLAGS);
262 if (!buf)
263 return -ENOMEM;
264
265 avail = xchk_stats_format(cs, buf, bufsize);
266 if (avail < 0) {
267 ret = avail;
268 goto out;
269 }
270
271 ret = simple_read_from_buffer(ubuf, count, ppos, buf, avail);
272out:
273 kvfree(buf);
274 return ret;
275}
276
277static const struct file_operations scrub_stats_fops = {
278 .open = simple_open,
279 .read = xchk_scrub_stats_read,
280};
281
282static ssize_t
283xchk_clear_scrub_stats_write(
284 struct file *file,
285 const char __user *ubuf,
286 size_t count,
287 loff_t *ppos)
288{
289 struct xchk_stats *cs = file->private_data;
290 unsigned int val;
291 int ret;
292
293 ret = kstrtouint_from_user(ubuf, count, 0, &val);
294 if (ret)
295 return ret;
296
297 if (val != 1)
298 return -EINVAL;
299
300 xchk_stats_clearall(cs);
301 return count;
302}
303
304static const struct file_operations clear_scrub_stats_fops = {
305 .open = simple_open,
306 .write = xchk_clear_scrub_stats_write,
307};
308
309/* Initialize the stats object. */
310STATIC int
311xchk_stats_init(
312 struct xchk_stats *cs,
313 struct xfs_mount *mp)
314{
315 struct xchk_scrub_stats *css = &cs->cs_stats[0];
316 unsigned int i;
317
318 for (i = 0; i < XFS_SCRUB_TYPE_NR; i++, css++)
319 spin_lock_init(&css->css_lock);
320
321 return 0;
322}
323
324/* Connect the stats object to debugfs. */
325void
326xchk_stats_register(
327 struct xchk_stats *cs,
328 struct dentry *parent)
329{
330 if (!parent)
331 return;
332
333 cs->cs_debugfs = xfs_debugfs_mkdir("scrub", parent);
334 if (!cs->cs_debugfs)
335 return;
336
337 debugfs_create_file("stats", 0444, cs->cs_debugfs, cs,
338 &scrub_stats_fops);
339 debugfs_create_file("clear_stats", 0200, cs->cs_debugfs, cs,
340 &clear_scrub_stats_fops);
341}
342
343/* Free all resources related to the stats object. */
344STATIC int
345xchk_stats_teardown(
346 struct xchk_stats *cs)
347{
348 return 0;
349}
350
351/* Disconnect the stats object from debugfs. */
352void
353xchk_stats_unregister(
354 struct xchk_stats *cs)
355{
356 debugfs_remove(cs->cs_debugfs);
357}
358
359/* Initialize global stats and register them */
360int __init
361xchk_global_stats_setup(
362 struct dentry *parent)
363{
364 int error;
365
366 error = xchk_stats_init(&global_stats, NULL);
367 if (error)
368 return error;
369
370 xchk_stats_register(&global_stats, parent);
371 return 0;
372}
373
374/* Unregister global stats and tear them down */
375void
376xchk_global_stats_teardown(void)
377{
378 xchk_stats_unregister(&global_stats);
379 xchk_stats_teardown(&global_stats);
380}
381
382/* Allocate per-mount stats */
383int
384xchk_mount_stats_alloc(
385 struct xfs_mount *mp)
386{
387 struct xchk_stats *cs;
388 int error;
389
390 cs = kvzalloc(sizeof(struct xchk_stats), GFP_KERNEL);
391 if (!cs)
392 return -ENOMEM;
393
394 error = xchk_stats_init(cs, mp);
395 if (error)
396 goto out_free;
397
398 mp->m_scrub_stats = cs;
399 return 0;
400out_free:
401 kvfree(cs);
402 return error;
403}
404
405/* Free per-mount stats */
406void
407xchk_mount_stats_free(
408 struct xfs_mount *mp)
409{
410 xchk_stats_teardown(mp->m_scrub_stats);
411 kvfree(mp->m_scrub_stats);
412 mp->m_scrub_stats = NULL;
413}