Loading...
1/*
2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#include "xfs.h"
19#include <linux/proc_fs.h>
20
21struct xstats xfsstats;
22
23static int counter_val(struct xfsstats __percpu *stats, int idx)
24{
25 int val = 0, cpu;
26
27 for_each_possible_cpu(cpu)
28 val += *(((__u32 *)per_cpu_ptr(stats, cpu) + idx));
29 return val;
30}
31
32int xfs_stats_format(struct xfsstats __percpu *stats, char *buf)
33{
34 int i, j;
35 int len = 0;
36 uint64_t xs_xstrat_bytes = 0;
37 uint64_t xs_write_bytes = 0;
38 uint64_t xs_read_bytes = 0;
39
40 static const struct xstats_entry {
41 char *desc;
42 int endpoint;
43 } xstats[] = {
44 { "extent_alloc", XFSSTAT_END_EXTENT_ALLOC },
45 { "abt", XFSSTAT_END_ALLOC_BTREE },
46 { "blk_map", XFSSTAT_END_BLOCK_MAPPING },
47 { "bmbt", XFSSTAT_END_BLOCK_MAP_BTREE },
48 { "dir", XFSSTAT_END_DIRECTORY_OPS },
49 { "trans", XFSSTAT_END_TRANSACTIONS },
50 { "ig", XFSSTAT_END_INODE_OPS },
51 { "log", XFSSTAT_END_LOG_OPS },
52 { "push_ail", XFSSTAT_END_TAIL_PUSHING },
53 { "xstrat", XFSSTAT_END_WRITE_CONVERT },
54 { "rw", XFSSTAT_END_READ_WRITE_OPS },
55 { "attr", XFSSTAT_END_ATTRIBUTE_OPS },
56 { "icluster", XFSSTAT_END_INODE_CLUSTER },
57 { "vnodes", XFSSTAT_END_VNODE_OPS },
58 { "buf", XFSSTAT_END_BUF },
59 { "abtb2", XFSSTAT_END_ABTB_V2 },
60 { "abtc2", XFSSTAT_END_ABTC_V2 },
61 { "bmbt2", XFSSTAT_END_BMBT_V2 },
62 { "ibt2", XFSSTAT_END_IBT_V2 },
63 { "fibt2", XFSSTAT_END_FIBT_V2 },
64 { "rmapbt", XFSSTAT_END_RMAP_V2 },
65 { "refcntbt", XFSSTAT_END_REFCOUNT },
66 /* we print both series of quota information together */
67 { "qm", XFSSTAT_END_QM },
68 };
69
70 /* Loop over all stats groups */
71
72 for (i = j = 0; i < ARRAY_SIZE(xstats); i++) {
73 len += snprintf(buf + len, PATH_MAX - len, "%s",
74 xstats[i].desc);
75 /* inner loop does each group */
76 for (; j < xstats[i].endpoint; j++)
77 len += snprintf(buf + len, PATH_MAX - len, " %u",
78 counter_val(stats, j));
79 len += snprintf(buf + len, PATH_MAX - len, "\n");
80 }
81 /* extra precision counters */
82 for_each_possible_cpu(i) {
83 xs_xstrat_bytes += per_cpu_ptr(stats, i)->s.xs_xstrat_bytes;
84 xs_write_bytes += per_cpu_ptr(stats, i)->s.xs_write_bytes;
85 xs_read_bytes += per_cpu_ptr(stats, i)->s.xs_read_bytes;
86 }
87
88 len += snprintf(buf + len, PATH_MAX-len, "xpc %Lu %Lu %Lu\n",
89 xs_xstrat_bytes, xs_write_bytes, xs_read_bytes);
90 len += snprintf(buf + len, PATH_MAX-len, "debug %u\n",
91#if defined(DEBUG)
92 1);
93#else
94 0);
95#endif
96
97 return len;
98}
99
100void xfs_stats_clearall(struct xfsstats __percpu *stats)
101{
102 int c;
103 uint32_t vn_active;
104
105 xfs_notice(NULL, "Clearing xfsstats");
106 for_each_possible_cpu(c) {
107 preempt_disable();
108 /* save vn_active, it's a universal truth! */
109 vn_active = per_cpu_ptr(stats, c)->s.vn_active;
110 memset(per_cpu_ptr(stats, c), 0, sizeof(*stats));
111 per_cpu_ptr(stats, c)->s.vn_active = vn_active;
112 preempt_enable();
113 }
114}
115
116/* legacy quota interfaces */
117#ifdef CONFIG_XFS_QUOTA
118static int xqm_proc_show(struct seq_file *m, void *v)
119{
120 /* maximum; incore; ratio free to inuse; freelist */
121 seq_printf(m, "%d\t%d\t%d\t%u\n",
122 0, counter_val(xfsstats.xs_stats, XFSSTAT_END_XQMSTAT),
123 0, counter_val(xfsstats.xs_stats, XFSSTAT_END_XQMSTAT + 1));
124 return 0;
125}
126
127static int xqm_proc_open(struct inode *inode, struct file *file)
128{
129 return single_open(file, xqm_proc_show, NULL);
130}
131
132static const struct file_operations xqm_proc_fops = {
133 .open = xqm_proc_open,
134 .read = seq_read,
135 .llseek = seq_lseek,
136 .release = single_release,
137};
138
139/* legacy quota stats interface no 2 */
140static int xqmstat_proc_show(struct seq_file *m, void *v)
141{
142 int j;
143
144 seq_printf(m, "qm");
145 for (j = XFSSTAT_END_IBT_V2; j < XFSSTAT_END_XQMSTAT; j++)
146 seq_printf(m, " %u", counter_val(xfsstats.xs_stats, j));
147 seq_putc(m, '\n');
148 return 0;
149}
150
151static int xqmstat_proc_open(struct inode *inode, struct file *file)
152{
153 return single_open(file, xqmstat_proc_show, NULL);
154}
155
156static const struct file_operations xqmstat_proc_fops = {
157 .owner = THIS_MODULE,
158 .open = xqmstat_proc_open,
159 .read = seq_read,
160 .llseek = seq_lseek,
161 .release = single_release,
162};
163#endif /* CONFIG_XFS_QUOTA */
164
165#ifdef CONFIG_PROC_FS
166int
167xfs_init_procfs(void)
168{
169 if (!proc_mkdir("fs/xfs", NULL))
170 return -ENOMEM;
171
172 if (!proc_symlink("fs/xfs/stat", NULL,
173 "/sys/fs/xfs/stats/stats"))
174 goto out;
175
176#ifdef CONFIG_XFS_QUOTA
177 if (!proc_create("fs/xfs/xqmstat", 0, NULL,
178 &xqmstat_proc_fops))
179 goto out;
180 if (!proc_create("fs/xfs/xqm", 0, NULL,
181 &xqm_proc_fops))
182 goto out;
183#endif
184 return 0;
185
186out:
187 remove_proc_subtree("fs/xfs", NULL);
188 return -ENOMEM;
189}
190
191void
192xfs_cleanup_procfs(void)
193{
194 remove_proc_subtree("fs/xfs", NULL);
195}
196#endif /* CONFIG_PROC_FS */
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6#include "xfs.h"
7
8struct xstats xfsstats;
9
10static int counter_val(struct xfsstats __percpu *stats, int idx)
11{
12 int val = 0, cpu;
13
14 for_each_possible_cpu(cpu)
15 val += *(((__u32 *)per_cpu_ptr(stats, cpu) + idx));
16 return val;
17}
18
19int xfs_stats_format(struct xfsstats __percpu *stats, char *buf)
20{
21 int i, j;
22 int len = 0;
23 uint64_t xs_xstrat_bytes = 0;
24 uint64_t xs_write_bytes = 0;
25 uint64_t xs_read_bytes = 0;
26 uint64_t defer_relog = 0;
27
28 static const struct xstats_entry {
29 char *desc;
30 int endpoint;
31 } xstats[] = {
32 { "extent_alloc", xfsstats_offset(xs_abt_lookup) },
33 { "abt", xfsstats_offset(xs_blk_mapr) },
34 { "blk_map", xfsstats_offset(xs_bmbt_lookup) },
35 { "bmbt", xfsstats_offset(xs_dir_lookup) },
36 { "dir", xfsstats_offset(xs_trans_sync) },
37 { "trans", xfsstats_offset(xs_ig_attempts) },
38 { "ig", xfsstats_offset(xs_log_writes) },
39 { "log", xfsstats_offset(xs_try_logspace)},
40 { "push_ail", xfsstats_offset(xs_xstrat_quick)},
41 { "xstrat", xfsstats_offset(xs_write_calls) },
42 { "rw", xfsstats_offset(xs_attr_get) },
43 { "attr", xfsstats_offset(xs_iflush_count)},
44 { "icluster", xfsstats_offset(vn_active) },
45 { "vnodes", xfsstats_offset(xb_get) },
46 { "buf", xfsstats_offset(xs_abtb_2) },
47 { "abtb2", xfsstats_offset(xs_abtc_2) },
48 { "abtc2", xfsstats_offset(xs_bmbt_2) },
49 { "bmbt2", xfsstats_offset(xs_ibt_2) },
50 { "ibt2", xfsstats_offset(xs_fibt_2) },
51 { "fibt2", xfsstats_offset(xs_rmap_2) },
52 { "rmapbt", xfsstats_offset(xs_refcbt_2) },
53 { "refcntbt", xfsstats_offset(xs_qm_dqreclaims)},
54 /* we print both series of quota information together */
55 { "qm", xfsstats_offset(xs_xstrat_bytes)},
56 };
57
58 /* Loop over all stats groups */
59
60 for (i = j = 0; i < ARRAY_SIZE(xstats); i++) {
61 len += scnprintf(buf + len, PATH_MAX - len, "%s",
62 xstats[i].desc);
63 /* inner loop does each group */
64 for (; j < xstats[i].endpoint; j++)
65 len += scnprintf(buf + len, PATH_MAX - len, " %u",
66 counter_val(stats, j));
67 len += scnprintf(buf + len, PATH_MAX - len, "\n");
68 }
69 /* extra precision counters */
70 for_each_possible_cpu(i) {
71 xs_xstrat_bytes += per_cpu_ptr(stats, i)->s.xs_xstrat_bytes;
72 xs_write_bytes += per_cpu_ptr(stats, i)->s.xs_write_bytes;
73 xs_read_bytes += per_cpu_ptr(stats, i)->s.xs_read_bytes;
74 defer_relog += per_cpu_ptr(stats, i)->s.defer_relog;
75 }
76
77 len += scnprintf(buf + len, PATH_MAX-len, "xpc %llu %llu %llu\n",
78 xs_xstrat_bytes, xs_write_bytes, xs_read_bytes);
79 len += scnprintf(buf + len, PATH_MAX-len, "defer_relog %llu\n",
80 defer_relog);
81 len += scnprintf(buf + len, PATH_MAX-len, "debug %u\n",
82#if defined(DEBUG)
83 1);
84#else
85 0);
86#endif
87
88 return len;
89}
90
91void xfs_stats_clearall(struct xfsstats __percpu *stats)
92{
93 int c;
94 uint32_t vn_active;
95
96 xfs_notice(NULL, "Clearing xfsstats");
97 for_each_possible_cpu(c) {
98 preempt_disable();
99 /* save vn_active, it's a universal truth! */
100 vn_active = per_cpu_ptr(stats, c)->s.vn_active;
101 memset(per_cpu_ptr(stats, c), 0, sizeof(*stats));
102 per_cpu_ptr(stats, c)->s.vn_active = vn_active;
103 preempt_enable();
104 }
105}
106
107#ifdef CONFIG_PROC_FS
108/* legacy quota interfaces */
109#ifdef CONFIG_XFS_QUOTA
110
111#define XFSSTAT_START_XQMSTAT xfsstats_offset(xs_qm_dqreclaims)
112#define XFSSTAT_END_XQMSTAT xfsstats_offset(xs_qm_dquot)
113
114static int xqm_proc_show(struct seq_file *m, void *v)
115{
116 /* maximum; incore; ratio free to inuse; freelist */
117 seq_printf(m, "%d\t%d\t%d\t%u\n",
118 0, counter_val(xfsstats.xs_stats, XFSSTAT_END_XQMSTAT),
119 0, counter_val(xfsstats.xs_stats, XFSSTAT_END_XQMSTAT + 1));
120 return 0;
121}
122
123/* legacy quota stats interface no 2 */
124static int xqmstat_proc_show(struct seq_file *m, void *v)
125{
126 int j;
127
128 seq_puts(m, "qm");
129 for (j = XFSSTAT_START_XQMSTAT; j < XFSSTAT_END_XQMSTAT; j++)
130 seq_printf(m, " %u", counter_val(xfsstats.xs_stats, j));
131 seq_putc(m, '\n');
132 return 0;
133}
134#endif /* CONFIG_XFS_QUOTA */
135
136int
137xfs_init_procfs(void)
138{
139 if (!proc_mkdir("fs/xfs", NULL))
140 return -ENOMEM;
141
142 if (!proc_symlink("fs/xfs/stat", NULL,
143 "/sys/fs/xfs/stats/stats"))
144 goto out;
145
146#ifdef CONFIG_XFS_QUOTA
147 if (!proc_create_single("fs/xfs/xqmstat", 0, NULL, xqmstat_proc_show))
148 goto out;
149 if (!proc_create_single("fs/xfs/xqm", 0, NULL, xqm_proc_show))
150 goto out;
151#endif
152 return 0;
153
154out:
155 remove_proc_subtree("fs/xfs", NULL);
156 return -ENOMEM;
157}
158
159void
160xfs_cleanup_procfs(void)
161{
162 remove_proc_subtree("fs/xfs", NULL);
163}
164#endif /* CONFIG_PROC_FS */