Linux Audio

Check our new training course

Loading...
v4.10.11
 
  1/*
  2 * f2fs shrinker support
  3 *   the basic infra was copied from fs/ubifs/shrinker.c
  4 *
  5 * Copyright (c) 2015 Motorola Mobility
  6 * Copyright (c) 2015 Jaegeuk Kim <jaegeuk@kernel.org>
  7 *
  8 * This program is free software; you can redistribute it and/or modify
  9 * it under the terms of the GNU General Public License version 2 as
 10 * published by the Free Software Foundation.
 11 */
 12#include <linux/fs.h>
 13#include <linux/f2fs_fs.h>
 14
 15#include "f2fs.h"
 16#include "node.h"
 17
 18static LIST_HEAD(f2fs_list);
 19static DEFINE_SPINLOCK(f2fs_list_lock);
 20static unsigned int shrinker_run_no;
 21
 22static unsigned long __count_nat_entries(struct f2fs_sb_info *sbi)
 23{
 24	long count = NM_I(sbi)->nat_cnt - NM_I(sbi)->dirty_nat_cnt;
 25
 26	return count > 0 ? count : 0;
 27}
 28
 29static unsigned long __count_free_nids(struct f2fs_sb_info *sbi)
 30{
 31	long count = NM_I(sbi)->nid_cnt[FREE_NID_LIST] - MAX_FREE_NIDS;
 32
 33	return count > 0 ? count : 0;
 34}
 35
 36static unsigned long __count_extent_cache(struct f2fs_sb_info *sbi)
 
 37{
 38	return atomic_read(&sbi->total_zombie_tree) +
 39				atomic_read(&sbi->total_ext_node);
 
 
 40}
 41
 42unsigned long f2fs_shrink_count(struct shrinker *shrink,
 43				struct shrink_control *sc)
 44{
 45	struct f2fs_sb_info *sbi;
 46	struct list_head *p;
 47	unsigned long count = 0;
 48
 49	spin_lock(&f2fs_list_lock);
 50	p = f2fs_list.next;
 51	while (p != &f2fs_list) {
 52		sbi = list_entry(p, struct f2fs_sb_info, s_list);
 53
 54		/* stop f2fs_put_super */
 55		if (!mutex_trylock(&sbi->umount_mutex)) {
 56			p = p->next;
 57			continue;
 58		}
 59		spin_unlock(&f2fs_list_lock);
 60
 61		/* count extent cache entries */
 62		count += __count_extent_cache(sbi);
 63
 64		/* shrink clean nat cache entries */
 
 
 
 65		count += __count_nat_entries(sbi);
 66
 67		/* count free nids cache entries */
 68		count += __count_free_nids(sbi);
 69
 70		spin_lock(&f2fs_list_lock);
 71		p = p->next;
 72		mutex_unlock(&sbi->umount_mutex);
 73	}
 74	spin_unlock(&f2fs_list_lock);
 75	return count;
 76}
 77
 78unsigned long f2fs_shrink_scan(struct shrinker *shrink,
 79				struct shrink_control *sc)
 80{
 81	unsigned long nr = sc->nr_to_scan;
 82	struct f2fs_sb_info *sbi;
 83	struct list_head *p;
 84	unsigned int run_no;
 85	unsigned long freed = 0;
 86
 87	spin_lock(&f2fs_list_lock);
 88	do {
 89		run_no = ++shrinker_run_no;
 90	} while (run_no == 0);
 91	p = f2fs_list.next;
 92	while (p != &f2fs_list) {
 93		sbi = list_entry(p, struct f2fs_sb_info, s_list);
 94
 95		if (sbi->shrinker_run_no == run_no)
 96			break;
 97
 98		/* stop f2fs_put_super */
 99		if (!mutex_trylock(&sbi->umount_mutex)) {
100			p = p->next;
101			continue;
102		}
103		spin_unlock(&f2fs_list_lock);
104
105		sbi->shrinker_run_no = run_no;
106
107		/* shrink extent cache entries */
108		freed += f2fs_shrink_extent_tree(sbi, nr >> 1);
 
 
 
109
110		/* shrink clean nat cache entries */
111		if (freed < nr)
112			freed += try_to_free_nats(sbi, nr - freed);
113
114		/* shrink free nids cache entries */
115		if (freed < nr)
116			freed += try_to_free_nids(sbi, nr - freed);
117
118		spin_lock(&f2fs_list_lock);
119		p = p->next;
120		list_move_tail(&sbi->s_list, &f2fs_list);
121		mutex_unlock(&sbi->umount_mutex);
122		if (freed >= nr)
123			break;
124	}
125	spin_unlock(&f2fs_list_lock);
126	return freed;
127}
128
129void f2fs_join_shrinker(struct f2fs_sb_info *sbi)
130{
131	spin_lock(&f2fs_list_lock);
132	list_add_tail(&sbi->s_list, &f2fs_list);
133	spin_unlock(&f2fs_list_lock);
134}
135
136void f2fs_leave_shrinker(struct f2fs_sb_info *sbi)
137{
138	f2fs_shrink_extent_tree(sbi, __count_extent_cache(sbi));
 
 
139
140	spin_lock(&f2fs_list_lock);
141	list_del(&sbi->s_list);
142	spin_unlock(&f2fs_list_lock);
143}
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * f2fs shrinker support
  4 *   the basic infra was copied from fs/ubifs/shrinker.c
  5 *
  6 * Copyright (c) 2015 Motorola Mobility
  7 * Copyright (c) 2015 Jaegeuk Kim <jaegeuk@kernel.org>
 
 
 
 
  8 */
  9#include <linux/fs.h>
 10#include <linux/f2fs_fs.h>
 11
 12#include "f2fs.h"
 13#include "node.h"
 14
 15static LIST_HEAD(f2fs_list);
 16static DEFINE_SPINLOCK(f2fs_list_lock);
 17static unsigned int shrinker_run_no;
 18
 19static unsigned long __count_nat_entries(struct f2fs_sb_info *sbi)
 20{
 21	return NM_I(sbi)->nat_cnt[RECLAIMABLE_NAT];
 
 
 22}
 23
 24static unsigned long __count_free_nids(struct f2fs_sb_info *sbi)
 25{
 26	long count = NM_I(sbi)->nid_cnt[FREE_NID] - MAX_FREE_NIDS;
 27
 28	return count > 0 ? count : 0;
 29}
 30
 31static unsigned long __count_extent_cache(struct f2fs_sb_info *sbi,
 32					enum extent_type type)
 33{
 34	struct extent_tree_info *eti = &sbi->extent_tree[type];
 35
 36	return atomic_read(&eti->total_zombie_tree) +
 37				atomic_read(&eti->total_ext_node);
 38}
 39
 40unsigned long f2fs_shrink_count(struct shrinker *shrink,
 41				struct shrink_control *sc)
 42{
 43	struct f2fs_sb_info *sbi;
 44	struct list_head *p;
 45	unsigned long count = 0;
 46
 47	spin_lock(&f2fs_list_lock);
 48	p = f2fs_list.next;
 49	while (p != &f2fs_list) {
 50		sbi = list_entry(p, struct f2fs_sb_info, s_list);
 51
 52		/* stop f2fs_put_super */
 53		if (!mutex_trylock(&sbi->umount_mutex)) {
 54			p = p->next;
 55			continue;
 56		}
 57		spin_unlock(&f2fs_list_lock);
 58
 59		/* count read extent cache entries */
 60		count += __count_extent_cache(sbi, EX_READ);
 61
 62		/* count block age extent cache entries */
 63		count += __count_extent_cache(sbi, EX_BLOCK_AGE);
 64
 65		/* count clean nat cache entries */
 66		count += __count_nat_entries(sbi);
 67
 68		/* count free nids cache entries */
 69		count += __count_free_nids(sbi);
 70
 71		spin_lock(&f2fs_list_lock);
 72		p = p->next;
 73		mutex_unlock(&sbi->umount_mutex);
 74	}
 75	spin_unlock(&f2fs_list_lock);
 76	return count;
 77}
 78
 79unsigned long f2fs_shrink_scan(struct shrinker *shrink,
 80				struct shrink_control *sc)
 81{
 82	unsigned long nr = sc->nr_to_scan;
 83	struct f2fs_sb_info *sbi;
 84	struct list_head *p;
 85	unsigned int run_no;
 86	unsigned long freed = 0;
 87
 88	spin_lock(&f2fs_list_lock);
 89	do {
 90		run_no = ++shrinker_run_no;
 91	} while (run_no == 0);
 92	p = f2fs_list.next;
 93	while (p != &f2fs_list) {
 94		sbi = list_entry(p, struct f2fs_sb_info, s_list);
 95
 96		if (sbi->shrinker_run_no == run_no)
 97			break;
 98
 99		/* stop f2fs_put_super */
100		if (!mutex_trylock(&sbi->umount_mutex)) {
101			p = p->next;
102			continue;
103		}
104		spin_unlock(&f2fs_list_lock);
105
106		sbi->shrinker_run_no = run_no;
107
108		/* shrink extent cache entries */
109		freed += f2fs_shrink_age_extent_tree(sbi, nr >> 2);
110
111		/* shrink read extent cache entries */
112		freed += f2fs_shrink_read_extent_tree(sbi, nr >> 2);
113
114		/* shrink clean nat cache entries */
115		if (freed < nr)
116			freed += f2fs_try_to_free_nats(sbi, nr - freed);
117
118		/* shrink free nids cache entries */
119		if (freed < nr)
120			freed += f2fs_try_to_free_nids(sbi, nr - freed);
121
122		spin_lock(&f2fs_list_lock);
123		p = p->next;
124		list_move_tail(&sbi->s_list, &f2fs_list);
125		mutex_unlock(&sbi->umount_mutex);
126		if (freed >= nr)
127			break;
128	}
129	spin_unlock(&f2fs_list_lock);
130	return freed;
131}
132
133void f2fs_join_shrinker(struct f2fs_sb_info *sbi)
134{
135	spin_lock(&f2fs_list_lock);
136	list_add_tail(&sbi->s_list, &f2fs_list);
137	spin_unlock(&f2fs_list_lock);
138}
139
140void f2fs_leave_shrinker(struct f2fs_sb_info *sbi)
141{
142	f2fs_shrink_read_extent_tree(sbi, __count_extent_cache(sbi, EX_READ));
143	f2fs_shrink_age_extent_tree(sbi,
144				__count_extent_cache(sbi, EX_BLOCK_AGE));
145
146	spin_lock(&f2fs_list_lock);
147	list_del_init(&sbi->s_list);
148	spin_unlock(&f2fs_list_lock);
149}