Linux Audio

Check our new training course

Loading...
v4.6
 
  1/*
  2 * f2fs debugging statistics
  3 *
  4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
  5 *             http://www.samsung.com/
  6 * Copyright (c) 2012 Linux Foundation
  7 * Copyright (c) 2012 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  8 *
  9 * This program is free software; you can redistribute it and/or modify
 10 * it under the terms of the GNU General Public License version 2 as
 11 * published by the Free Software Foundation.
 12 */
 13
 14#include <linux/fs.h>
 15#include <linux/backing-dev.h>
 16#include <linux/f2fs_fs.h>
 17#include <linux/blkdev.h>
 18#include <linux/debugfs.h>
 19#include <linux/seq_file.h>
 20
 21#include "f2fs.h"
 22#include "node.h"
 23#include "segment.h"
 24#include "gc.h"
 25
 26static LIST_HEAD(f2fs_stat_list);
 27static struct dentry *f2fs_debugfs_root;
 28static DEFINE_MUTEX(f2fs_stat_mutex);
 29
 30static void update_general_status(struct f2fs_sb_info *sbi)
 31{
 32	struct f2fs_stat_info *si = F2FS_STAT(sbi);
 
 33	int i;
 34
 
 
 
 
 
 
 35	/* validation check of the segment numbers */
 36	si->hit_largest = atomic64_read(&sbi->read_hit_largest);
 37	si->hit_cached = atomic64_read(&sbi->read_hit_cached);
 38	si->hit_rbtree = atomic64_read(&sbi->read_hit_rbtree);
 39	si->hit_total = si->hit_largest + si->hit_cached + si->hit_rbtree;
 40	si->total_ext = atomic64_read(&sbi->total_hit_ext);
 41	si->ext_tree = atomic_read(&sbi->total_ext_tree);
 42	si->zombie_tree = atomic_read(&sbi->total_zombie_tree);
 43	si->ext_node = atomic_read(&sbi->total_ext_node);
 44	si->ndirty_node = get_pages(sbi, F2FS_DIRTY_NODES);
 45	si->ndirty_dent = get_pages(sbi, F2FS_DIRTY_DENTS);
 46	si->ndirty_meta = get_pages(sbi, F2FS_DIRTY_META);
 47	si->ndirty_data = get_pages(sbi, F2FS_DIRTY_DATA);
 
 
 48	si->ndirty_dirs = sbi->ndirty_inode[DIR_INODE];
 49	si->ndirty_files = sbi->ndirty_inode[FILE_INODE];
 
 
 50	si->inmem_pages = get_pages(sbi, F2FS_INMEM_PAGES);
 51	si->wb_pages = get_pages(sbi, F2FS_WRITEBACK);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 52	si->total_count = (int)sbi->user_block_count / sbi->blocks_per_seg;
 53	si->rsvd_segs = reserved_segments(sbi);
 54	si->overp_segs = overprovision_segments(sbi);
 55	si->valid_count = valid_user_blocks(sbi);
 
 56	si->valid_node_count = valid_node_count(sbi);
 57	si->valid_inode_count = valid_inode_count(sbi);
 58	si->inline_xattr = atomic_read(&sbi->inline_xattr);
 59	si->inline_inode = atomic_read(&sbi->inline_inode);
 60	si->inline_dir = atomic_read(&sbi->inline_dir);
 
 
 
 61	si->utilization = utilization(sbi);
 62
 63	si->free_segs = free_segments(sbi);
 64	si->free_secs = free_sections(sbi);
 65	si->prefree_count = prefree_segments(sbi);
 66	si->dirty_count = dirty_segments(sbi);
 67	si->node_pages = NODE_MAPPING(sbi)->nrpages;
 68	si->meta_pages = META_MAPPING(sbi)->nrpages;
 
 
 69	si->nats = NM_I(sbi)->nat_cnt;
 70	si->dirty_nats = NM_I(sbi)->dirty_nat_cnt;
 71	si->sits = MAIN_SEGS(sbi);
 72	si->dirty_sits = SIT_I(sbi)->dirty_sentries;
 73	si->fnids = NM_I(sbi)->fcnt;
 
 
 74	si->bg_gc = sbi->bg_gc;
 
 
 
 
 75	si->util_free = (int)(free_user_blocks(sbi) >> sbi->log_blocks_per_seg)
 76		* 100 / (int)(sbi->user_block_count >> sbi->log_blocks_per_seg)
 77		/ 2;
 78	si->util_valid = (int)(written_block_count(sbi) >>
 79						sbi->log_blocks_per_seg)
 80		* 100 / (int)(sbi->user_block_count >> sbi->log_blocks_per_seg)
 81		/ 2;
 82	si->util_invalid = 50 - si->util_free - si->util_valid;
 83	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_NODE; i++) {
 84		struct curseg_info *curseg = CURSEG_I(sbi, i);
 85		si->curseg[i] = curseg->segno;
 86		si->cursec[i] = curseg->segno / sbi->segs_per_sec;
 87		si->curzone[i] = si->cursec[i] / sbi->secs_per_zone;
 88	}
 89
 
 
 
 90	for (i = 0; i < 2; i++) {
 91		si->segment_count[i] = sbi->segment_count[i];
 92		si->block_count[i] = sbi->block_count[i];
 93	}
 94
 95	si->inplace_count = atomic_read(&sbi->inplace_count);
 96}
 97
 98/*
 99 * This function calculates BDF of every segments
100 */
101static void update_sit_info(struct f2fs_sb_info *sbi)
102{
103	struct f2fs_stat_info *si = F2FS_STAT(sbi);
104	unsigned long long blks_per_sec, hblks_per_sec, total_vblocks;
105	unsigned long long bimodal, dist;
106	unsigned int segno, vblocks;
107	int ndirty = 0;
108
109	bimodal = 0;
110	total_vblocks = 0;
111	blks_per_sec = sbi->segs_per_sec * sbi->blocks_per_seg;
112	hblks_per_sec = blks_per_sec / 2;
113	for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) {
114		vblocks = get_valid_blocks(sbi, segno, sbi->segs_per_sec);
115		dist = abs(vblocks - hblks_per_sec);
116		bimodal += dist * dist;
117
118		if (vblocks > 0 && vblocks < blks_per_sec) {
119			total_vblocks += vblocks;
120			ndirty++;
121		}
122	}
123	dist = div_u64(MAIN_SECS(sbi) * hblks_per_sec * hblks_per_sec, 100);
124	si->bimodal = div64_u64(bimodal, dist);
125	if (si->dirty_count)
126		si->avg_vblocks = div_u64(total_vblocks, ndirty);
127	else
128		si->avg_vblocks = 0;
129}
130
131/*
132 * This function calculates memory footprint.
133 */
134static void update_mem_info(struct f2fs_sb_info *sbi)
135{
136	struct f2fs_stat_info *si = F2FS_STAT(sbi);
137	unsigned npages;
138	int i;
139
140	if (si->base_mem)
141		goto get_cache;
142
143	si->base_mem = sizeof(struct f2fs_sb_info) + sbi->sb->s_blocksize;
 
 
 
 
144	si->base_mem += 2 * sizeof(struct f2fs_inode_info);
145	si->base_mem += sizeof(*sbi->ckpt);
146
147	/* build sm */
148	si->base_mem += sizeof(struct f2fs_sm_info);
149
150	/* build sit */
151	si->base_mem += sizeof(struct sit_info);
152	si->base_mem += MAIN_SEGS(sbi) * sizeof(struct seg_entry);
153	si->base_mem += f2fs_bitmap_size(MAIN_SEGS(sbi));
154	si->base_mem += 3 * SIT_VBLOCK_MAP_SIZE * MAIN_SEGS(sbi);
 
155	si->base_mem += SIT_VBLOCK_MAP_SIZE;
156	if (sbi->segs_per_sec > 1)
157		si->base_mem += MAIN_SECS(sbi) * sizeof(struct sec_entry);
158	si->base_mem += __bitmap_size(sbi, SIT_BITMAP);
159
160	/* build free segmap */
161	si->base_mem += sizeof(struct free_segmap_info);
162	si->base_mem += f2fs_bitmap_size(MAIN_SEGS(sbi));
163	si->base_mem += f2fs_bitmap_size(MAIN_SECS(sbi));
164
165	/* build curseg */
166	si->base_mem += sizeof(struct curseg_info) * NR_CURSEG_TYPE;
167	si->base_mem += PAGE_SIZE * NR_CURSEG_TYPE;
168
169	/* build dirty segmap */
170	si->base_mem += sizeof(struct dirty_seglist_info);
171	si->base_mem += NR_DIRTY_TYPE * f2fs_bitmap_size(MAIN_SEGS(sbi));
172	si->base_mem += f2fs_bitmap_size(MAIN_SECS(sbi));
173
174	/* build nm */
175	si->base_mem += sizeof(struct f2fs_nm_info);
176	si->base_mem += __bitmap_size(sbi, NAT_BITMAP);
 
 
 
 
 
177
178get_cache:
179	si->cache_mem = 0;
180
181	/* build gc */
182	if (sbi->gc_thread)
183		si->cache_mem += sizeof(struct f2fs_gc_kthread);
184
185	/* build merge flush thread */
186	if (SM_I(sbi)->cmd_control_info)
187		si->cache_mem += sizeof(struct flush_cmd_control);
 
 
 
 
 
188
189	/* free nids */
190	si->cache_mem += NM_I(sbi)->fcnt * sizeof(struct free_nid);
 
 
191	si->cache_mem += NM_I(sbi)->nat_cnt * sizeof(struct nat_entry);
192	si->cache_mem += NM_I(sbi)->dirty_nat_cnt *
193					sizeof(struct nat_entry_set);
194	si->cache_mem += si->inmem_pages * sizeof(struct inmem_pages);
195	for (i = 0; i <= UPDATE_INO; i++)
196		si->cache_mem += sbi->im[i].ino_num * sizeof(struct ino_entry);
197	si->cache_mem += atomic_read(&sbi->total_ext_tree) *
198						sizeof(struct extent_tree);
199	si->cache_mem += atomic_read(&sbi->total_ext_node) *
200						sizeof(struct extent_node);
201
202	si->page_mem = 0;
203	npages = NODE_MAPPING(sbi)->nrpages;
204	si->page_mem += (unsigned long long)npages << PAGE_SHIFT;
205	npages = META_MAPPING(sbi)->nrpages;
206	si->page_mem += (unsigned long long)npages << PAGE_SHIFT;
 
 
 
 
207}
208
209static int stat_show(struct seq_file *s, void *v)
210{
211	struct f2fs_stat_info *si;
212	int i = 0;
213	int j;
214
215	mutex_lock(&f2fs_stat_mutex);
216	list_for_each_entry(si, &f2fs_stat_list, stat_list) {
217		update_general_status(si->sbi);
218
219		seq_printf(s, "\n=====[ partition info(%pg). #%d ]=====\n",
220			si->sbi->sb->s_bdev, i++);
 
 
 
221		seq_printf(s, "[SB: 1] [CP: 2] [SIT: %d] [NAT: %d] ",
222			   si->sit_area_segs, si->nat_area_segs);
223		seq_printf(s, "[SSA: %d] [MAIN: %d",
224			   si->ssa_area_segs, si->main_area_segs);
225		seq_printf(s, "(OverProv:%d Resv:%d)]\n\n",
226			   si->overp_segs, si->rsvd_segs);
227		seq_printf(s, "Utilization: %d%% (%d valid blocks)\n",
228			   si->utilization, si->valid_count);
 
 
 
 
 
229		seq_printf(s, "  - Node: %u (Inode: %u, ",
230			   si->valid_node_count, si->valid_inode_count);
231		seq_printf(s, "Other: %u)\n  - Data: %u\n",
232			   si->valid_node_count - si->valid_inode_count,
233			   si->valid_count - si->valid_node_count);
234		seq_printf(s, "  - Inline_xattr Inode: %u\n",
235			   si->inline_xattr);
236		seq_printf(s, "  - Inline_data Inode: %u\n",
237			   si->inline_inode);
238		seq_printf(s, "  - Inline_dentry Inode: %u\n",
239			   si->inline_dir);
 
 
240		seq_printf(s, "\nMain area: %d segs, %d secs %d zones\n",
241			   si->main_area_segs, si->main_area_sections,
242			   si->main_area_zones);
243		seq_printf(s, "  - COLD  data: %d, %d, %d\n",
244			   si->curseg[CURSEG_COLD_DATA],
245			   si->cursec[CURSEG_COLD_DATA],
246			   si->curzone[CURSEG_COLD_DATA]);
247		seq_printf(s, "  - WARM  data: %d, %d, %d\n",
248			   si->curseg[CURSEG_WARM_DATA],
249			   si->cursec[CURSEG_WARM_DATA],
250			   si->curzone[CURSEG_WARM_DATA]);
251		seq_printf(s, "  - HOT   data: %d, %d, %d\n",
252			   si->curseg[CURSEG_HOT_DATA],
253			   si->cursec[CURSEG_HOT_DATA],
254			   si->curzone[CURSEG_HOT_DATA]);
255		seq_printf(s, "  - Dir   dnode: %d, %d, %d\n",
256			   si->curseg[CURSEG_HOT_NODE],
257			   si->cursec[CURSEG_HOT_NODE],
258			   si->curzone[CURSEG_HOT_NODE]);
259		seq_printf(s, "  - File   dnode: %d, %d, %d\n",
260			   si->curseg[CURSEG_WARM_NODE],
261			   si->cursec[CURSEG_WARM_NODE],
262			   si->curzone[CURSEG_WARM_NODE]);
263		seq_printf(s, "  - Indir nodes: %d, %d, %d\n",
264			   si->curseg[CURSEG_COLD_NODE],
265			   si->cursec[CURSEG_COLD_NODE],
266			   si->curzone[CURSEG_COLD_NODE]);
267		seq_printf(s, "\n  - Valid: %d\n  - Dirty: %d\n",
268			   si->main_area_segs - si->dirty_count -
269			   si->prefree_count - si->free_segs,
270			   si->dirty_count);
271		seq_printf(s, "  - Prefree: %d\n  - Free: %d (%d)\n\n",
272			   si->prefree_count, si->free_segs, si->free_secs);
273		seq_printf(s, "CP calls: %d (BG: %d)\n",
274				si->cp_count, si->bg_cp_count);
 
 
 
 
 
 
 
275		seq_printf(s, "GC calls: %d (BG: %d)\n",
276			   si->call_count, si->bg_gc);
277		seq_printf(s, "  - data segments : %d (%d)\n",
278				si->data_segs, si->bg_data_segs);
279		seq_printf(s, "  - node segments : %d (%d)\n",
280				si->node_segs, si->bg_node_segs);
281		seq_printf(s, "Try to move %d blocks (BG: %d)\n", si->tot_blks,
282				si->bg_data_blks + si->bg_node_blks);
283		seq_printf(s, "  - data blocks : %d (%d)\n", si->data_blks,
284				si->bg_data_blks);
285		seq_printf(s, "  - node blocks : %d (%d)\n", si->node_blks,
286				si->bg_node_blks);
 
 
 
 
 
 
287		seq_puts(s, "\nExtent Cache:\n");
288		seq_printf(s, "  - Hit Count: L1-1:%llu L1-2:%llu L2:%llu\n",
289				si->hit_largest, si->hit_cached,
290				si->hit_rbtree);
291		seq_printf(s, "  - Hit Ratio: %llu%% (%llu / %llu)\n",
292				!si->total_ext ? 0 :
293				div64_u64(si->hit_total * 100, si->total_ext),
294				si->hit_total, si->total_ext);
295		seq_printf(s, "  - Inner Struct Count: tree: %d(%d), node: %d\n",
296				si->ext_tree, si->zombie_tree, si->ext_node);
297		seq_puts(s, "\nBalancing F2FS Async:\n");
298		seq_printf(s, "  - inmem: %4d, wb: %4d\n",
299			   si->inmem_pages, si->wb_pages);
 
 
 
 
 
 
 
 
 
 
 
 
 
300		seq_printf(s, "  - nodes: %4d in %4d\n",
301			   si->ndirty_node, si->node_pages);
302		seq_printf(s, "  - dents: %4d in dirs:%4d\n",
303			   si->ndirty_dent, si->ndirty_dirs);
304		seq_printf(s, "  - datas: %4d in files:%4d\n",
305			   si->ndirty_data, si->ndirty_files);
 
 
306		seq_printf(s, "  - meta: %4d in %4d\n",
307			   si->ndirty_meta, si->meta_pages);
 
 
308		seq_printf(s, "  - NATs: %9d/%9d\n  - SITs: %9d/%9d\n",
309			   si->dirty_nats, si->nats, si->dirty_sits, si->sits);
310		seq_printf(s, "  - free_nids: %9d\n",
311			   si->fnids);
312		seq_puts(s, "\nDistribution of User Blocks:");
313		seq_puts(s, " [ valid | invalid | free ]\n");
314		seq_puts(s, "  [");
315
316		for (j = 0; j < si->util_valid; j++)
317			seq_putc(s, '-');
318		seq_putc(s, '|');
319
320		for (j = 0; j < si->util_invalid; j++)
321			seq_putc(s, '-');
322		seq_putc(s, '|');
323
324		for (j = 0; j < si->util_free; j++)
325			seq_putc(s, '-');
326		seq_puts(s, "]\n\n");
327		seq_printf(s, "IPU: %u blocks\n", si->inplace_count);
328		seq_printf(s, "SSR: %u blocks in %u segments\n",
329			   si->block_count[SSR], si->segment_count[SSR]);
330		seq_printf(s, "LFS: %u blocks in %u segments\n",
331			   si->block_count[LFS], si->segment_count[LFS]);
332
333		/* segment usage info */
334		update_sit_info(si->sbi);
335		seq_printf(s, "\nBDF: %u, avg. vblocks: %u\n",
336			   si->bimodal, si->avg_vblocks);
337
338		/* memory footprint */
339		update_mem_info(si->sbi);
340		seq_printf(s, "\nMemory: %llu KB\n",
341			(si->base_mem + si->cache_mem + si->page_mem) >> 10);
342		seq_printf(s, "  - static: %llu KB\n",
343				si->base_mem >> 10);
344		seq_printf(s, "  - cached: %llu KB\n",
345				si->cache_mem >> 10);
346		seq_printf(s, "  - paged : %llu KB\n",
347				si->page_mem >> 10);
348	}
349	mutex_unlock(&f2fs_stat_mutex);
350	return 0;
351}
352
353static int stat_open(struct inode *inode, struct file *file)
354{
355	return single_open(file, stat_show, inode->i_private);
356}
357
358static const struct file_operations stat_fops = {
359	.open = stat_open,
360	.read = seq_read,
361	.llseek = seq_lseek,
362	.release = single_release,
363};
364
365int f2fs_build_stats(struct f2fs_sb_info *sbi)
366{
367	struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
368	struct f2fs_stat_info *si;
 
369
370	si = kzalloc(sizeof(struct f2fs_stat_info), GFP_KERNEL);
371	if (!si)
372		return -ENOMEM;
373
374	si->all_area_segs = le32_to_cpu(raw_super->segment_count);
375	si->sit_area_segs = le32_to_cpu(raw_super->segment_count_sit);
376	si->nat_area_segs = le32_to_cpu(raw_super->segment_count_nat);
377	si->ssa_area_segs = le32_to_cpu(raw_super->segment_count_ssa);
378	si->main_area_segs = le32_to_cpu(raw_super->segment_count_main);
379	si->main_area_sections = le32_to_cpu(raw_super->section_count);
380	si->main_area_zones = si->main_area_sections /
381				le32_to_cpu(raw_super->secs_per_zone);
382	si->sbi = sbi;
383	sbi->stat_info = si;
384
385	atomic64_set(&sbi->total_hit_ext, 0);
386	atomic64_set(&sbi->read_hit_rbtree, 0);
387	atomic64_set(&sbi->read_hit_largest, 0);
388	atomic64_set(&sbi->read_hit_cached, 0);
389
390	atomic_set(&sbi->inline_xattr, 0);
391	atomic_set(&sbi->inline_inode, 0);
392	atomic_set(&sbi->inline_dir, 0);
393	atomic_set(&sbi->inplace_count, 0);
 
 
 
 
 
 
 
394
395	mutex_lock(&f2fs_stat_mutex);
396	list_add_tail(&si->stat_list, &f2fs_stat_list);
397	mutex_unlock(&f2fs_stat_mutex);
398
399	return 0;
400}
401
402void f2fs_destroy_stats(struct f2fs_sb_info *sbi)
403{
404	struct f2fs_stat_info *si = F2FS_STAT(sbi);
405
406	mutex_lock(&f2fs_stat_mutex);
407	list_del(&si->stat_list);
408	mutex_unlock(&f2fs_stat_mutex);
409
410	kfree(si);
411}
412
413int __init f2fs_create_root_stats(void)
414{
415	struct dentry *file;
416
417	f2fs_debugfs_root = debugfs_create_dir("f2fs", NULL);
418	if (!f2fs_debugfs_root)
419		return -ENOMEM;
420
421	file = debugfs_create_file("status", S_IRUGO, f2fs_debugfs_root,
422			NULL, &stat_fops);
423	if (!file) {
424		debugfs_remove(f2fs_debugfs_root);
425		f2fs_debugfs_root = NULL;
426		return -ENOMEM;
427	}
428
429	return 0;
 
430}
431
432void f2fs_destroy_root_stats(void)
433{
434	if (!f2fs_debugfs_root)
435		return;
436
437	debugfs_remove_recursive(f2fs_debugfs_root);
438	f2fs_debugfs_root = NULL;
439}
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * f2fs debugging statistics
  4 *
  5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
  6 *             http://www.samsung.com/
  7 * Copyright (c) 2012 Linux Foundation
  8 * Copyright (c) 2012 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 
 
 
 
  9 */
 10
 11#include <linux/fs.h>
 12#include <linux/backing-dev.h>
 13#include <linux/f2fs_fs.h>
 14#include <linux/blkdev.h>
 15#include <linux/debugfs.h>
 16#include <linux/seq_file.h>
 17
 18#include "f2fs.h"
 19#include "node.h"
 20#include "segment.h"
 21#include "gc.h"
 22
 23static LIST_HEAD(f2fs_stat_list);
 24static struct dentry *f2fs_debugfs_root;
 25static DEFINE_MUTEX(f2fs_stat_mutex);
 26
 27static void update_general_status(struct f2fs_sb_info *sbi)
 28{
 29	struct f2fs_stat_info *si = F2FS_STAT(sbi);
 30	struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
 31	int i;
 32
 33	/* these will be changed if online resize is done */
 34	si->main_area_segs = le32_to_cpu(raw_super->segment_count_main);
 35	si->main_area_sections = le32_to_cpu(raw_super->section_count);
 36	si->main_area_zones = si->main_area_sections /
 37				le32_to_cpu(raw_super->secs_per_zone);
 38
 39	/* validation check of the segment numbers */
 40	si->hit_largest = atomic64_read(&sbi->read_hit_largest);
 41	si->hit_cached = atomic64_read(&sbi->read_hit_cached);
 42	si->hit_rbtree = atomic64_read(&sbi->read_hit_rbtree);
 43	si->hit_total = si->hit_largest + si->hit_cached + si->hit_rbtree;
 44	si->total_ext = atomic64_read(&sbi->total_hit_ext);
 45	si->ext_tree = atomic_read(&sbi->total_ext_tree);
 46	si->zombie_tree = atomic_read(&sbi->total_zombie_tree);
 47	si->ext_node = atomic_read(&sbi->total_ext_node);
 48	si->ndirty_node = get_pages(sbi, F2FS_DIRTY_NODES);
 49	si->ndirty_dent = get_pages(sbi, F2FS_DIRTY_DENTS);
 50	si->ndirty_meta = get_pages(sbi, F2FS_DIRTY_META);
 51	si->ndirty_data = get_pages(sbi, F2FS_DIRTY_DATA);
 52	si->ndirty_qdata = get_pages(sbi, F2FS_DIRTY_QDATA);
 53	si->ndirty_imeta = get_pages(sbi, F2FS_DIRTY_IMETA);
 54	si->ndirty_dirs = sbi->ndirty_inode[DIR_INODE];
 55	si->ndirty_files = sbi->ndirty_inode[FILE_INODE];
 56	si->nquota_files = sbi->nquota_files;
 57	si->ndirty_all = sbi->ndirty_inode[DIRTY_META];
 58	si->inmem_pages = get_pages(sbi, F2FS_INMEM_PAGES);
 59	si->aw_cnt = atomic_read(&sbi->aw_cnt);
 60	si->vw_cnt = atomic_read(&sbi->vw_cnt);
 61	si->max_aw_cnt = atomic_read(&sbi->max_aw_cnt);
 62	si->max_vw_cnt = atomic_read(&sbi->max_vw_cnt);
 63	si->nr_dio_read = get_pages(sbi, F2FS_DIO_READ);
 64	si->nr_dio_write = get_pages(sbi, F2FS_DIO_WRITE);
 65	si->nr_wb_cp_data = get_pages(sbi, F2FS_WB_CP_DATA);
 66	si->nr_wb_data = get_pages(sbi, F2FS_WB_DATA);
 67	si->nr_rd_data = get_pages(sbi, F2FS_RD_DATA);
 68	si->nr_rd_node = get_pages(sbi, F2FS_RD_NODE);
 69	si->nr_rd_meta = get_pages(sbi, F2FS_RD_META);
 70	if (SM_I(sbi)->fcc_info) {
 71		si->nr_flushed =
 72			atomic_read(&SM_I(sbi)->fcc_info->issued_flush);
 73		si->nr_flushing =
 74			atomic_read(&SM_I(sbi)->fcc_info->queued_flush);
 75		si->flush_list_empty =
 76			llist_empty(&SM_I(sbi)->fcc_info->issue_list);
 77	}
 78	if (SM_I(sbi)->dcc_info) {
 79		si->nr_discarded =
 80			atomic_read(&SM_I(sbi)->dcc_info->issued_discard);
 81		si->nr_discarding =
 82			atomic_read(&SM_I(sbi)->dcc_info->queued_discard);
 83		si->nr_discard_cmd =
 84			atomic_read(&SM_I(sbi)->dcc_info->discard_cmd_cnt);
 85		si->undiscard_blks = SM_I(sbi)->dcc_info->undiscard_blks;
 86	}
 87	si->total_count = (int)sbi->user_block_count / sbi->blocks_per_seg;
 88	si->rsvd_segs = reserved_segments(sbi);
 89	si->overp_segs = overprovision_segments(sbi);
 90	si->valid_count = valid_user_blocks(sbi);
 91	si->discard_blks = discard_blocks(sbi);
 92	si->valid_node_count = valid_node_count(sbi);
 93	si->valid_inode_count = valid_inode_count(sbi);
 94	si->inline_xattr = atomic_read(&sbi->inline_xattr);
 95	si->inline_inode = atomic_read(&sbi->inline_inode);
 96	si->inline_dir = atomic_read(&sbi->inline_dir);
 97	si->append = sbi->im[APPEND_INO].ino_num;
 98	si->update = sbi->im[UPDATE_INO].ino_num;
 99	si->orphans = sbi->im[ORPHAN_INO].ino_num;
100	si->utilization = utilization(sbi);
101
102	si->free_segs = free_segments(sbi);
103	si->free_secs = free_sections(sbi);
104	si->prefree_count = prefree_segments(sbi);
105	si->dirty_count = dirty_segments(sbi);
106	if (sbi->node_inode)
107		si->node_pages = NODE_MAPPING(sbi)->nrpages;
108	if (sbi->meta_inode)
109		si->meta_pages = META_MAPPING(sbi)->nrpages;
110	si->nats = NM_I(sbi)->nat_cnt;
111	si->dirty_nats = NM_I(sbi)->dirty_nat_cnt;
112	si->sits = MAIN_SEGS(sbi);
113	si->dirty_sits = SIT_I(sbi)->dirty_sentries;
114	si->free_nids = NM_I(sbi)->nid_cnt[FREE_NID];
115	si->avail_nids = NM_I(sbi)->available_nids;
116	si->alloc_nids = NM_I(sbi)->nid_cnt[PREALLOC_NID];
117	si->bg_gc = sbi->bg_gc;
118	si->io_skip_bggc = sbi->io_skip_bggc;
119	si->other_skip_bggc = sbi->other_skip_bggc;
120	si->skipped_atomic_files[BG_GC] = sbi->skipped_atomic_files[BG_GC];
121	si->skipped_atomic_files[FG_GC] = sbi->skipped_atomic_files[FG_GC];
122	si->util_free = (int)(free_user_blocks(sbi) >> sbi->log_blocks_per_seg)
123		* 100 / (int)(sbi->user_block_count >> sbi->log_blocks_per_seg)
124		/ 2;
125	si->util_valid = (int)(written_block_count(sbi) >>
126						sbi->log_blocks_per_seg)
127		* 100 / (int)(sbi->user_block_count >> sbi->log_blocks_per_seg)
128		/ 2;
129	si->util_invalid = 50 - si->util_free - si->util_valid;
130	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_NODE; i++) {
131		struct curseg_info *curseg = CURSEG_I(sbi, i);
132		si->curseg[i] = curseg->segno;
133		si->cursec[i] = GET_SEC_FROM_SEG(sbi, curseg->segno);
134		si->curzone[i] = GET_ZONE_FROM_SEC(sbi, si->cursec[i]);
135	}
136
137	for (i = META_CP; i < META_MAX; i++)
138		si->meta_count[i] = atomic_read(&sbi->meta_count[i]);
139
140	for (i = 0; i < 2; i++) {
141		si->segment_count[i] = sbi->segment_count[i];
142		si->block_count[i] = sbi->block_count[i];
143	}
144
145	si->inplace_count = atomic_read(&sbi->inplace_count);
146}
147
148/*
149 * This function calculates BDF of every segments
150 */
151static void update_sit_info(struct f2fs_sb_info *sbi)
152{
153	struct f2fs_stat_info *si = F2FS_STAT(sbi);
154	unsigned long long blks_per_sec, hblks_per_sec, total_vblocks;
155	unsigned long long bimodal, dist;
156	unsigned int segno, vblocks;
157	int ndirty = 0;
158
159	bimodal = 0;
160	total_vblocks = 0;
161	blks_per_sec = BLKS_PER_SEC(sbi);
162	hblks_per_sec = blks_per_sec / 2;
163	for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) {
164		vblocks = get_valid_blocks(sbi, segno, true);
165		dist = abs(vblocks - hblks_per_sec);
166		bimodal += dist * dist;
167
168		if (vblocks > 0 && vblocks < blks_per_sec) {
169			total_vblocks += vblocks;
170			ndirty++;
171		}
172	}
173	dist = div_u64(MAIN_SECS(sbi) * hblks_per_sec * hblks_per_sec, 100);
174	si->bimodal = div64_u64(bimodal, dist);
175	if (si->dirty_count)
176		si->avg_vblocks = div_u64(total_vblocks, ndirty);
177	else
178		si->avg_vblocks = 0;
179}
180
181/*
182 * This function calculates memory footprint.
183 */
184static void update_mem_info(struct f2fs_sb_info *sbi)
185{
186	struct f2fs_stat_info *si = F2FS_STAT(sbi);
 
187	int i;
188
189	if (si->base_mem)
190		goto get_cache;
191
192	/* build stat */
193	si->base_mem = sizeof(struct f2fs_stat_info);
194
195	/* build superblock */
196	si->base_mem += sizeof(struct f2fs_sb_info) + sbi->sb->s_blocksize;
197	si->base_mem += 2 * sizeof(struct f2fs_inode_info);
198	si->base_mem += sizeof(*sbi->ckpt);
199
200	/* build sm */
201	si->base_mem += sizeof(struct f2fs_sm_info);
202
203	/* build sit */
204	si->base_mem += sizeof(struct sit_info);
205	si->base_mem += MAIN_SEGS(sbi) * sizeof(struct seg_entry);
206	si->base_mem += f2fs_bitmap_size(MAIN_SEGS(sbi));
207	si->base_mem += 2 * SIT_VBLOCK_MAP_SIZE * MAIN_SEGS(sbi);
208	si->base_mem += SIT_VBLOCK_MAP_SIZE * MAIN_SEGS(sbi);
209	si->base_mem += SIT_VBLOCK_MAP_SIZE;
210	if (__is_large_section(sbi))
211		si->base_mem += MAIN_SECS(sbi) * sizeof(struct sec_entry);
212	si->base_mem += __bitmap_size(sbi, SIT_BITMAP);
213
214	/* build free segmap */
215	si->base_mem += sizeof(struct free_segmap_info);
216	si->base_mem += f2fs_bitmap_size(MAIN_SEGS(sbi));
217	si->base_mem += f2fs_bitmap_size(MAIN_SECS(sbi));
218
219	/* build curseg */
220	si->base_mem += sizeof(struct curseg_info) * NR_CURSEG_TYPE;
221	si->base_mem += PAGE_SIZE * NR_CURSEG_TYPE;
222
223	/* build dirty segmap */
224	si->base_mem += sizeof(struct dirty_seglist_info);
225	si->base_mem += NR_DIRTY_TYPE * f2fs_bitmap_size(MAIN_SEGS(sbi));
226	si->base_mem += f2fs_bitmap_size(MAIN_SECS(sbi));
227
228	/* build nm */
229	si->base_mem += sizeof(struct f2fs_nm_info);
230	si->base_mem += __bitmap_size(sbi, NAT_BITMAP);
231	si->base_mem += (NM_I(sbi)->nat_bits_blocks << F2FS_BLKSIZE_BITS);
232	si->base_mem += NM_I(sbi)->nat_blocks *
233				f2fs_bitmap_size(NAT_ENTRY_PER_BLOCK);
234	si->base_mem += NM_I(sbi)->nat_blocks / 8;
235	si->base_mem += NM_I(sbi)->nat_blocks * sizeof(unsigned short);
236
237get_cache:
238	si->cache_mem = 0;
239
240	/* build gc */
241	if (sbi->gc_thread)
242		si->cache_mem += sizeof(struct f2fs_gc_kthread);
243
244	/* build merge flush thread */
245	if (SM_I(sbi)->fcc_info)
246		si->cache_mem += sizeof(struct flush_cmd_control);
247	if (SM_I(sbi)->dcc_info) {
248		si->cache_mem += sizeof(struct discard_cmd_control);
249		si->cache_mem += sizeof(struct discard_cmd) *
250			atomic_read(&SM_I(sbi)->dcc_info->discard_cmd_cnt);
251	}
252
253	/* free nids */
254	si->cache_mem += (NM_I(sbi)->nid_cnt[FREE_NID] +
255				NM_I(sbi)->nid_cnt[PREALLOC_NID]) *
256				sizeof(struct free_nid);
257	si->cache_mem += NM_I(sbi)->nat_cnt * sizeof(struct nat_entry);
258	si->cache_mem += NM_I(sbi)->dirty_nat_cnt *
259					sizeof(struct nat_entry_set);
260	si->cache_mem += si->inmem_pages * sizeof(struct inmem_pages);
261	for (i = 0; i < MAX_INO_ENTRY; i++)
262		si->cache_mem += sbi->im[i].ino_num * sizeof(struct ino_entry);
263	si->cache_mem += atomic_read(&sbi->total_ext_tree) *
264						sizeof(struct extent_tree);
265	si->cache_mem += atomic_read(&sbi->total_ext_node) *
266						sizeof(struct extent_node);
267
268	si->page_mem = 0;
269	if (sbi->node_inode) {
270		unsigned npages = NODE_MAPPING(sbi)->nrpages;
271		si->page_mem += (unsigned long long)npages << PAGE_SHIFT;
272	}
273	if (sbi->meta_inode) {
274		unsigned npages = META_MAPPING(sbi)->nrpages;
275		si->page_mem += (unsigned long long)npages << PAGE_SHIFT;
276	}
277}
278
279static int stat_show(struct seq_file *s, void *v)
280{
281	struct f2fs_stat_info *si;
282	int i = 0;
283	int j;
284
285	mutex_lock(&f2fs_stat_mutex);
286	list_for_each_entry(si, &f2fs_stat_list, stat_list) {
287		update_general_status(si->sbi);
288
289		seq_printf(s, "\n=====[ partition info(%pg). #%d, %s, CP: %s]=====\n",
290			si->sbi->sb->s_bdev, i++,
291			f2fs_readonly(si->sbi->sb) ? "RO": "RW",
292			is_set_ckpt_flags(si->sbi, CP_DISABLED_FLAG) ?
293			"Disabled": (f2fs_cp_error(si->sbi) ? "Error": "Good"));
294		seq_printf(s, "[SB: 1] [CP: 2] [SIT: %d] [NAT: %d] ",
295			   si->sit_area_segs, si->nat_area_segs);
296		seq_printf(s, "[SSA: %d] [MAIN: %d",
297			   si->ssa_area_segs, si->main_area_segs);
298		seq_printf(s, "(OverProv:%d Resv:%d)]\n\n",
299			   si->overp_segs, si->rsvd_segs);
300		if (test_opt(si->sbi, DISCARD))
301			seq_printf(s, "Utilization: %u%% (%u valid blocks, %u discard blocks)\n",
302				si->utilization, si->valid_count, si->discard_blks);
303		else
304			seq_printf(s, "Utilization: %u%% (%u valid blocks)\n",
305				si->utilization, si->valid_count);
306
307		seq_printf(s, "  - Node: %u (Inode: %u, ",
308			   si->valid_node_count, si->valid_inode_count);
309		seq_printf(s, "Other: %u)\n  - Data: %u\n",
310			   si->valid_node_count - si->valid_inode_count,
311			   si->valid_count - si->valid_node_count);
312		seq_printf(s, "  - Inline_xattr Inode: %u\n",
313			   si->inline_xattr);
314		seq_printf(s, "  - Inline_data Inode: %u\n",
315			   si->inline_inode);
316		seq_printf(s, "  - Inline_dentry Inode: %u\n",
317			   si->inline_dir);
318		seq_printf(s, "  - Orphan/Append/Update Inode: %u, %u, %u\n",
319			   si->orphans, si->append, si->update);
320		seq_printf(s, "\nMain area: %d segs, %d secs %d zones\n",
321			   si->main_area_segs, si->main_area_sections,
322			   si->main_area_zones);
323		seq_printf(s, "  - COLD  data: %d, %d, %d\n",
324			   si->curseg[CURSEG_COLD_DATA],
325			   si->cursec[CURSEG_COLD_DATA],
326			   si->curzone[CURSEG_COLD_DATA]);
327		seq_printf(s, "  - WARM  data: %d, %d, %d\n",
328			   si->curseg[CURSEG_WARM_DATA],
329			   si->cursec[CURSEG_WARM_DATA],
330			   si->curzone[CURSEG_WARM_DATA]);
331		seq_printf(s, "  - HOT   data: %d, %d, %d\n",
332			   si->curseg[CURSEG_HOT_DATA],
333			   si->cursec[CURSEG_HOT_DATA],
334			   si->curzone[CURSEG_HOT_DATA]);
335		seq_printf(s, "  - Dir   dnode: %d, %d, %d\n",
336			   si->curseg[CURSEG_HOT_NODE],
337			   si->cursec[CURSEG_HOT_NODE],
338			   si->curzone[CURSEG_HOT_NODE]);
339		seq_printf(s, "  - File   dnode: %d, %d, %d\n",
340			   si->curseg[CURSEG_WARM_NODE],
341			   si->cursec[CURSEG_WARM_NODE],
342			   si->curzone[CURSEG_WARM_NODE]);
343		seq_printf(s, "  - Indir nodes: %d, %d, %d\n",
344			   si->curseg[CURSEG_COLD_NODE],
345			   si->cursec[CURSEG_COLD_NODE],
346			   si->curzone[CURSEG_COLD_NODE]);
347		seq_printf(s, "\n  - Valid: %d\n  - Dirty: %d\n",
348			   si->main_area_segs - si->dirty_count -
349			   si->prefree_count - si->free_segs,
350			   si->dirty_count);
351		seq_printf(s, "  - Prefree: %d\n  - Free: %d (%d)\n\n",
352			   si->prefree_count, si->free_segs, si->free_secs);
353		seq_printf(s, "CP calls: %d (BG: %d)\n",
354				si->cp_count, si->bg_cp_count);
355		seq_printf(s, "  - cp blocks : %u\n", si->meta_count[META_CP]);
356		seq_printf(s, "  - sit blocks : %u\n",
357				si->meta_count[META_SIT]);
358		seq_printf(s, "  - nat blocks : %u\n",
359				si->meta_count[META_NAT]);
360		seq_printf(s, "  - ssa blocks : %u\n",
361				si->meta_count[META_SSA]);
362		seq_printf(s, "GC calls: %d (BG: %d)\n",
363			   si->call_count, si->bg_gc);
364		seq_printf(s, "  - data segments : %d (%d)\n",
365				si->data_segs, si->bg_data_segs);
366		seq_printf(s, "  - node segments : %d (%d)\n",
367				si->node_segs, si->bg_node_segs);
368		seq_printf(s, "Try to move %d blocks (BG: %d)\n", si->tot_blks,
369				si->bg_data_blks + si->bg_node_blks);
370		seq_printf(s, "  - data blocks : %d (%d)\n", si->data_blks,
371				si->bg_data_blks);
372		seq_printf(s, "  - node blocks : %d (%d)\n", si->node_blks,
373				si->bg_node_blks);
374		seq_printf(s, "Skipped : atomic write %llu (%llu)\n",
375				si->skipped_atomic_files[BG_GC] +
376				si->skipped_atomic_files[FG_GC],
377				si->skipped_atomic_files[BG_GC]);
378		seq_printf(s, "BG skip : IO: %u, Other: %u\n",
379				si->io_skip_bggc, si->other_skip_bggc);
380		seq_puts(s, "\nExtent Cache:\n");
381		seq_printf(s, "  - Hit Count: L1-1:%llu L1-2:%llu L2:%llu\n",
382				si->hit_largest, si->hit_cached,
383				si->hit_rbtree);
384		seq_printf(s, "  - Hit Ratio: %llu%% (%llu / %llu)\n",
385				!si->total_ext ? 0 :
386				div64_u64(si->hit_total * 100, si->total_ext),
387				si->hit_total, si->total_ext);
388		seq_printf(s, "  - Inner Struct Count: tree: %d(%d), node: %d\n",
389				si->ext_tree, si->zombie_tree, si->ext_node);
390		seq_puts(s, "\nBalancing F2FS Async:\n");
391		seq_printf(s, "  - DIO (R: %4d, W: %4d)\n",
392			   si->nr_dio_read, si->nr_dio_write);
393		seq_printf(s, "  - IO_R (Data: %4d, Node: %4d, Meta: %4d\n",
394			   si->nr_rd_data, si->nr_rd_node, si->nr_rd_meta);
395		seq_printf(s, "  - IO_W (CP: %4d, Data: %4d, Flush: (%4d %4d %4d), "
396			"Discard: (%4d %4d)) cmd: %4d undiscard:%4u\n",
397			   si->nr_wb_cp_data, si->nr_wb_data,
398			   si->nr_flushing, si->nr_flushed,
399			   si->flush_list_empty,
400			   si->nr_discarding, si->nr_discarded,
401			   si->nr_discard_cmd, si->undiscard_blks);
402		seq_printf(s, "  - inmem: %4d, atomic IO: %4d (Max. %4d), "
403			"volatile IO: %4d (Max. %4d)\n",
404			   si->inmem_pages, si->aw_cnt, si->max_aw_cnt,
405			   si->vw_cnt, si->max_vw_cnt);
406		seq_printf(s, "  - nodes: %4d in %4d\n",
407			   si->ndirty_node, si->node_pages);
408		seq_printf(s, "  - dents: %4d in dirs:%4d (%4d)\n",
409			   si->ndirty_dent, si->ndirty_dirs, si->ndirty_all);
410		seq_printf(s, "  - datas: %4d in files:%4d\n",
411			   si->ndirty_data, si->ndirty_files);
412		seq_printf(s, "  - quota datas: %4d in quota files:%4d\n",
413			   si->ndirty_qdata, si->nquota_files);
414		seq_printf(s, "  - meta: %4d in %4d\n",
415			   si->ndirty_meta, si->meta_pages);
416		seq_printf(s, "  - imeta: %4d\n",
417			   si->ndirty_imeta);
418		seq_printf(s, "  - NATs: %9d/%9d\n  - SITs: %9d/%9d\n",
419			   si->dirty_nats, si->nats, si->dirty_sits, si->sits);
420		seq_printf(s, "  - free_nids: %9d/%9d\n  - alloc_nids: %9d\n",
421			   si->free_nids, si->avail_nids, si->alloc_nids);
422		seq_puts(s, "\nDistribution of User Blocks:");
423		seq_puts(s, " [ valid | invalid | free ]\n");
424		seq_puts(s, "  [");
425
426		for (j = 0; j < si->util_valid; j++)
427			seq_putc(s, '-');
428		seq_putc(s, '|');
429
430		for (j = 0; j < si->util_invalid; j++)
431			seq_putc(s, '-');
432		seq_putc(s, '|');
433
434		for (j = 0; j < si->util_free; j++)
435			seq_putc(s, '-');
436		seq_puts(s, "]\n\n");
437		seq_printf(s, "IPU: %u blocks\n", si->inplace_count);
438		seq_printf(s, "SSR: %u blocks in %u segments\n",
439			   si->block_count[SSR], si->segment_count[SSR]);
440		seq_printf(s, "LFS: %u blocks in %u segments\n",
441			   si->block_count[LFS], si->segment_count[LFS]);
442
443		/* segment usage info */
444		update_sit_info(si->sbi);
445		seq_printf(s, "\nBDF: %u, avg. vblocks: %u\n",
446			   si->bimodal, si->avg_vblocks);
447
448		/* memory footprint */
449		update_mem_info(si->sbi);
450		seq_printf(s, "\nMemory: %llu KB\n",
451			(si->base_mem + si->cache_mem + si->page_mem) >> 10);
452		seq_printf(s, "  - static: %llu KB\n",
453				si->base_mem >> 10);
454		seq_printf(s, "  - cached: %llu KB\n",
455				si->cache_mem >> 10);
456		seq_printf(s, "  - paged : %llu KB\n",
457				si->page_mem >> 10);
458	}
459	mutex_unlock(&f2fs_stat_mutex);
460	return 0;
461}
462
463DEFINE_SHOW_ATTRIBUTE(stat);
 
 
 
 
 
 
 
 
 
 
464
465int f2fs_build_stats(struct f2fs_sb_info *sbi)
466{
467	struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
468	struct f2fs_stat_info *si;
469	int i;
470
471	si = f2fs_kzalloc(sbi, sizeof(struct f2fs_stat_info), GFP_KERNEL);
472	if (!si)
473		return -ENOMEM;
474
475	si->all_area_segs = le32_to_cpu(raw_super->segment_count);
476	si->sit_area_segs = le32_to_cpu(raw_super->segment_count_sit);
477	si->nat_area_segs = le32_to_cpu(raw_super->segment_count_nat);
478	si->ssa_area_segs = le32_to_cpu(raw_super->segment_count_ssa);
479	si->main_area_segs = le32_to_cpu(raw_super->segment_count_main);
480	si->main_area_sections = le32_to_cpu(raw_super->section_count);
481	si->main_area_zones = si->main_area_sections /
482				le32_to_cpu(raw_super->secs_per_zone);
483	si->sbi = sbi;
484	sbi->stat_info = si;
485
486	atomic64_set(&sbi->total_hit_ext, 0);
487	atomic64_set(&sbi->read_hit_rbtree, 0);
488	atomic64_set(&sbi->read_hit_largest, 0);
489	atomic64_set(&sbi->read_hit_cached, 0);
490
491	atomic_set(&sbi->inline_xattr, 0);
492	atomic_set(&sbi->inline_inode, 0);
493	atomic_set(&sbi->inline_dir, 0);
494	atomic_set(&sbi->inplace_count, 0);
495	for (i = META_CP; i < META_MAX; i++)
496		atomic_set(&sbi->meta_count[i], 0);
497
498	atomic_set(&sbi->aw_cnt, 0);
499	atomic_set(&sbi->vw_cnt, 0);
500	atomic_set(&sbi->max_aw_cnt, 0);
501	atomic_set(&sbi->max_vw_cnt, 0);
502
503	mutex_lock(&f2fs_stat_mutex);
504	list_add_tail(&si->stat_list, &f2fs_stat_list);
505	mutex_unlock(&f2fs_stat_mutex);
506
507	return 0;
508}
509
510void f2fs_destroy_stats(struct f2fs_sb_info *sbi)
511{
512	struct f2fs_stat_info *si = F2FS_STAT(sbi);
513
514	mutex_lock(&f2fs_stat_mutex);
515	list_del(&si->stat_list);
516	mutex_unlock(&f2fs_stat_mutex);
517
518	kvfree(si);
519}
520
521void __init f2fs_create_root_stats(void)
522{
 
 
523	f2fs_debugfs_root = debugfs_create_dir("f2fs", NULL);
 
 
 
 
 
 
 
 
 
 
524
525	debugfs_create_file("status", S_IRUGO, f2fs_debugfs_root, NULL,
526			    &stat_fops);
527}
528
529void f2fs_destroy_root_stats(void)
530{
 
 
 
531	debugfs_remove_recursive(f2fs_debugfs_root);
532	f2fs_debugfs_root = NULL;
533}