Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
  4 * All Rights Reserved.
  5 */
  6#include "xfs.h"
  7
  8struct xstats xfsstats;
  9
 10static int counter_val(struct xfsstats __percpu *stats, int idx)
 11{
 12	int val = 0, cpu;
 13
 14	for_each_possible_cpu(cpu)
 15		val += *(((__u32 *)per_cpu_ptr(stats, cpu) + idx));
 16	return val;
 17}
 18
 19int xfs_stats_format(struct xfsstats __percpu *stats, char *buf)
 20{
 21	int		i, j;
 22	int		len = 0;
 23	uint64_t	xs_xstrat_bytes = 0;
 24	uint64_t	xs_write_bytes = 0;
 25	uint64_t	xs_read_bytes = 0;
 
 26
 27	static const struct xstats_entry {
 28		char	*desc;
 29		int	endpoint;
 30	} xstats[] = {
 31		{ "extent_alloc",	xfsstats_offset(xs_abt_lookup)	},
 32		{ "abt",		xfsstats_offset(xs_blk_mapr)	},
 33		{ "blk_map",		xfsstats_offset(xs_bmbt_lookup)	},
 34		{ "bmbt",		xfsstats_offset(xs_dir_lookup)	},
 35		{ "dir",		xfsstats_offset(xs_trans_sync)	},
 36		{ "trans",		xfsstats_offset(xs_ig_attempts)	},
 37		{ "ig",			xfsstats_offset(xs_log_writes)	},
 38		{ "log",		xfsstats_offset(xs_try_logspace)},
 39		{ "push_ail",		xfsstats_offset(xs_xstrat_quick)},
 40		{ "xstrat",		xfsstats_offset(xs_write_calls)	},
 41		{ "rw",			xfsstats_offset(xs_attr_get)	},
 42		{ "attr",		xfsstats_offset(xs_iflush_count)},
 43		{ "icluster",		xfsstats_offset(vn_active)	},
 44		{ "vnodes",		xfsstats_offset(xb_get)		},
 45		{ "buf",		xfsstats_offset(xs_abtb_2)	},
 46		{ "abtb2",		xfsstats_offset(xs_abtc_2)	},
 47		{ "abtc2",		xfsstats_offset(xs_bmbt_2)	},
 48		{ "bmbt2",		xfsstats_offset(xs_ibt_2)	},
 49		{ "ibt2",		xfsstats_offset(xs_fibt_2)	},
 50		{ "fibt2",		xfsstats_offset(xs_rmap_2)	},
 51		{ "rmapbt",		xfsstats_offset(xs_refcbt_2)	},
 52		{ "refcntbt",		xfsstats_offset(xs_qm_dqreclaims)},
 53		/* we print both series of quota information together */
 54		{ "qm",			xfsstats_offset(xs_xstrat_bytes)},
 55	};
 56
 57	/* Loop over all stats groups */
 58
 59	for (i = j = 0; i < ARRAY_SIZE(xstats); i++) {
 60		len += scnprintf(buf + len, PATH_MAX - len, "%s",
 61				xstats[i].desc);
 62		/* inner loop does each group */
 63		for (; j < xstats[i].endpoint; j++)
 64			len += scnprintf(buf + len, PATH_MAX - len, " %u",
 65					counter_val(stats, j));
 66		len += scnprintf(buf + len, PATH_MAX - len, "\n");
 67	}
 68	/* extra precision counters */
 69	for_each_possible_cpu(i) {
 70		xs_xstrat_bytes += per_cpu_ptr(stats, i)->s.xs_xstrat_bytes;
 71		xs_write_bytes += per_cpu_ptr(stats, i)->s.xs_write_bytes;
 72		xs_read_bytes += per_cpu_ptr(stats, i)->s.xs_read_bytes;
 
 73	}
 74
 75	len += scnprintf(buf + len, PATH_MAX-len, "xpc %Lu %Lu %Lu\n",
 76			xs_xstrat_bytes, xs_write_bytes, xs_read_bytes);
 
 
 77	len += scnprintf(buf + len, PATH_MAX-len, "debug %u\n",
 78#if defined(DEBUG)
 79		1);
 80#else
 81		0);
 82#endif
 83
 84	return len;
 85}
 86
 87void xfs_stats_clearall(struct xfsstats __percpu *stats)
 88{
 89	int		c;
 90	uint32_t	vn_active;
 91
 92	xfs_notice(NULL, "Clearing xfsstats");
 93	for_each_possible_cpu(c) {
 94		preempt_disable();
 95		/* save vn_active, it's a universal truth! */
 96		vn_active = per_cpu_ptr(stats, c)->s.vn_active;
 97		memset(per_cpu_ptr(stats, c), 0, sizeof(*stats));
 98		per_cpu_ptr(stats, c)->s.vn_active = vn_active;
 99		preempt_enable();
100	}
101}
102
103#ifdef CONFIG_PROC_FS
104/* legacy quota interfaces */
105#ifdef CONFIG_XFS_QUOTA
106
107#define XFSSTAT_START_XQMSTAT xfsstats_offset(xs_qm_dqreclaims)
108#define XFSSTAT_END_XQMSTAT xfsstats_offset(xs_qm_dquot)
109
110static int xqm_proc_show(struct seq_file *m, void *v)
111{
112	/* maximum; incore; ratio free to inuse; freelist */
113	seq_printf(m, "%d\t%d\t%d\t%u\n",
114		   0, counter_val(xfsstats.xs_stats, XFSSTAT_END_XQMSTAT),
115		   0, counter_val(xfsstats.xs_stats, XFSSTAT_END_XQMSTAT + 1));
116	return 0;
117}
118
119/* legacy quota stats interface no 2 */
120static int xqmstat_proc_show(struct seq_file *m, void *v)
121{
122	int j;
123
124	seq_printf(m, "qm");
125	for (j = XFSSTAT_START_XQMSTAT; j < XFSSTAT_END_XQMSTAT; j++)
126		seq_printf(m, " %u", counter_val(xfsstats.xs_stats, j));
127	seq_putc(m, '\n');
128	return 0;
129}
130#endif /* CONFIG_XFS_QUOTA */
131
132int
133xfs_init_procfs(void)
134{
135	if (!proc_mkdir("fs/xfs", NULL))
136		return -ENOMEM;
137
138	if (!proc_symlink("fs/xfs/stat", NULL,
139			  "/sys/fs/xfs/stats/stats"))
140		goto out;
141
142#ifdef CONFIG_XFS_QUOTA
143	if (!proc_create_single("fs/xfs/xqmstat", 0, NULL, xqmstat_proc_show))
144		goto out;
145	if (!proc_create_single("fs/xfs/xqm", 0, NULL, xqm_proc_show))
146		goto out;
147#endif
148	return 0;
149
150out:
151	remove_proc_subtree("fs/xfs", NULL);
152	return -ENOMEM;
153}
154
155void
156xfs_cleanup_procfs(void)
157{
158	remove_proc_subtree("fs/xfs", NULL);
159}
160#endif /* CONFIG_PROC_FS */
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
  4 * All Rights Reserved.
  5 */
  6#include "xfs.h"
  7
  8struct xstats xfsstats;
  9
 10static int counter_val(struct xfsstats __percpu *stats, int idx)
 11{
 12	int val = 0, cpu;
 13
 14	for_each_possible_cpu(cpu)
 15		val += *(((__u32 *)per_cpu_ptr(stats, cpu) + idx));
 16	return val;
 17}
 18
 19int xfs_stats_format(struct xfsstats __percpu *stats, char *buf)
 20{
 21	int		i, j;
 22	int		len = 0;
 23	uint64_t	xs_xstrat_bytes = 0;
 24	uint64_t	xs_write_bytes = 0;
 25	uint64_t	xs_read_bytes = 0;
 26	uint64_t	defer_relog = 0;
 27
 28	static const struct xstats_entry {
 29		char	*desc;
 30		int	endpoint;
 31	} xstats[] = {
 32		{ "extent_alloc",	xfsstats_offset(xs_abt_lookup)	},
 33		{ "abt",		xfsstats_offset(xs_blk_mapr)	},
 34		{ "blk_map",		xfsstats_offset(xs_bmbt_lookup)	},
 35		{ "bmbt",		xfsstats_offset(xs_dir_lookup)	},
 36		{ "dir",		xfsstats_offset(xs_trans_sync)	},
 37		{ "trans",		xfsstats_offset(xs_ig_attempts)	},
 38		{ "ig",			xfsstats_offset(xs_log_writes)	},
 39		{ "log",		xfsstats_offset(xs_try_logspace)},
 40		{ "push_ail",		xfsstats_offset(xs_xstrat_quick)},
 41		{ "xstrat",		xfsstats_offset(xs_write_calls)	},
 42		{ "rw",			xfsstats_offset(xs_attr_get)	},
 43		{ "attr",		xfsstats_offset(xs_iflush_count)},
 44		{ "icluster",		xfsstats_offset(vn_active)	},
 45		{ "vnodes",		xfsstats_offset(xb_get)		},
 46		{ "buf",		xfsstats_offset(xs_abtb_2)	},
 47		{ "abtb2",		xfsstats_offset(xs_abtc_2)	},
 48		{ "abtc2",		xfsstats_offset(xs_bmbt_2)	},
 49		{ "bmbt2",		xfsstats_offset(xs_ibt_2)	},
 50		{ "ibt2",		xfsstats_offset(xs_fibt_2)	},
 51		{ "fibt2",		xfsstats_offset(xs_rmap_2)	},
 52		{ "rmapbt",		xfsstats_offset(xs_refcbt_2)	},
 53		{ "refcntbt",		xfsstats_offset(xs_qm_dqreclaims)},
 54		/* we print both series of quota information together */
 55		{ "qm",			xfsstats_offset(xs_xstrat_bytes)},
 56	};
 57
 58	/* Loop over all stats groups */
 59
 60	for (i = j = 0; i < ARRAY_SIZE(xstats); i++) {
 61		len += scnprintf(buf + len, PATH_MAX - len, "%s",
 62				xstats[i].desc);
 63		/* inner loop does each group */
 64		for (; j < xstats[i].endpoint; j++)
 65			len += scnprintf(buf + len, PATH_MAX - len, " %u",
 66					counter_val(stats, j));
 67		len += scnprintf(buf + len, PATH_MAX - len, "\n");
 68	}
 69	/* extra precision counters */
 70	for_each_possible_cpu(i) {
 71		xs_xstrat_bytes += per_cpu_ptr(stats, i)->s.xs_xstrat_bytes;
 72		xs_write_bytes += per_cpu_ptr(stats, i)->s.xs_write_bytes;
 73		xs_read_bytes += per_cpu_ptr(stats, i)->s.xs_read_bytes;
 74		defer_relog += per_cpu_ptr(stats, i)->s.defer_relog;
 75	}
 76
 77	len += scnprintf(buf + len, PATH_MAX-len, "xpc %Lu %Lu %Lu\n",
 78			xs_xstrat_bytes, xs_write_bytes, xs_read_bytes);
 79	len += scnprintf(buf + len, PATH_MAX-len, "defer_relog %llu\n",
 80			defer_relog);
 81	len += scnprintf(buf + len, PATH_MAX-len, "debug %u\n",
 82#if defined(DEBUG)
 83		1);
 84#else
 85		0);
 86#endif
 87
 88	return len;
 89}
 90
 91void xfs_stats_clearall(struct xfsstats __percpu *stats)
 92{
 93	int		c;
 94	uint32_t	vn_active;
 95
 96	xfs_notice(NULL, "Clearing xfsstats");
 97	for_each_possible_cpu(c) {
 98		preempt_disable();
 99		/* save vn_active, it's a universal truth! */
100		vn_active = per_cpu_ptr(stats, c)->s.vn_active;
101		memset(per_cpu_ptr(stats, c), 0, sizeof(*stats));
102		per_cpu_ptr(stats, c)->s.vn_active = vn_active;
103		preempt_enable();
104	}
105}
106
107#ifdef CONFIG_PROC_FS
108/* legacy quota interfaces */
109#ifdef CONFIG_XFS_QUOTA
110
111#define XFSSTAT_START_XQMSTAT xfsstats_offset(xs_qm_dqreclaims)
112#define XFSSTAT_END_XQMSTAT xfsstats_offset(xs_qm_dquot)
113
114static int xqm_proc_show(struct seq_file *m, void *v)
115{
116	/* maximum; incore; ratio free to inuse; freelist */
117	seq_printf(m, "%d\t%d\t%d\t%u\n",
118		   0, counter_val(xfsstats.xs_stats, XFSSTAT_END_XQMSTAT),
119		   0, counter_val(xfsstats.xs_stats, XFSSTAT_END_XQMSTAT + 1));
120	return 0;
121}
122
123/* legacy quota stats interface no 2 */
124static int xqmstat_proc_show(struct seq_file *m, void *v)
125{
126	int j;
127
128	seq_printf(m, "qm");
129	for (j = XFSSTAT_START_XQMSTAT; j < XFSSTAT_END_XQMSTAT; j++)
130		seq_printf(m, " %u", counter_val(xfsstats.xs_stats, j));
131	seq_putc(m, '\n');
132	return 0;
133}
134#endif /* CONFIG_XFS_QUOTA */
135
136int
137xfs_init_procfs(void)
138{
139	if (!proc_mkdir("fs/xfs", NULL))
140		return -ENOMEM;
141
142	if (!proc_symlink("fs/xfs/stat", NULL,
143			  "/sys/fs/xfs/stats/stats"))
144		goto out;
145
146#ifdef CONFIG_XFS_QUOTA
147	if (!proc_create_single("fs/xfs/xqmstat", 0, NULL, xqmstat_proc_show))
148		goto out;
149	if (!proc_create_single("fs/xfs/xqm", 0, NULL, xqm_proc_show))
150		goto out;
151#endif
152	return 0;
153
154out:
155	remove_proc_subtree("fs/xfs", NULL);
156	return -ENOMEM;
157}
158
159void
160xfs_cleanup_procfs(void)
161{
162	remove_proc_subtree("fs/xfs", NULL);
163}
164#endif /* CONFIG_PROC_FS */