Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * DAMON Primitives for The Physical Address Space
  4 *
  5 * Author: SeongJae Park <sj@kernel.org>
  6 */
  7
  8#define pr_fmt(fmt) "damon-pa: " fmt
  9
 10#include <linux/mmu_notifier.h>
 11#include <linux/page_idle.h>
 12#include <linux/pagemap.h>
 13#include <linux/rmap.h>
 14#include <linux/swap.h>
 
 
 
 15
 16#include "../internal.h"
 17#include "ops-common.h"
 18
 19static bool __damon_pa_mkold(struct folio *folio, struct vm_area_struct *vma,
 20		unsigned long addr, void *arg)
 21{
 22	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
 23
 24	while (page_vma_mapped_walk(&pvmw)) {
 25		addr = pvmw.address;
 26		if (pvmw.pte)
 27			damon_ptep_mkold(pvmw.pte, vma->vm_mm, addr);
 28		else
 29			damon_pmdp_mkold(pvmw.pmd, vma->vm_mm, addr);
 30	}
 31	return true;
 32}
 33
 34static void damon_pa_mkold(unsigned long paddr)
 35{
 36	struct folio *folio;
 37	struct page *page = damon_get_page(PHYS_PFN(paddr));
 38	struct rmap_walk_control rwc = {
 39		.rmap_one = __damon_pa_mkold,
 40		.anon_lock = folio_lock_anon_vma_read,
 41	};
 42	bool need_lock;
 43
 44	if (!page)
 45		return;
 46	folio = page_folio(page);
 47
 48	if (!folio_mapped(folio) || !folio_raw_mapping(folio)) {
 49		folio_set_idle(folio);
 50		goto out;
 51	}
 52
 53	need_lock = !folio_test_anon(folio) || folio_test_ksm(folio);
 54	if (need_lock && !folio_trylock(folio))
 55		goto out;
 56
 57	rmap_walk(folio, &rwc);
 58
 59	if (need_lock)
 60		folio_unlock(folio);
 61
 62out:
 
 
 
 
 
 
 
 
 
 63	folio_put(folio);
 64}
 65
 66static void __damon_pa_prepare_access_check(struct damon_region *r)
 67{
 68	r->sampling_addr = damon_rand(r->ar.start, r->ar.end);
 69
 70	damon_pa_mkold(r->sampling_addr);
 71}
 72
 73static void damon_pa_prepare_access_checks(struct damon_ctx *ctx)
 74{
 75	struct damon_target *t;
 76	struct damon_region *r;
 77
 78	damon_for_each_target(t, ctx) {
 79		damon_for_each_region(r, t)
 80			__damon_pa_prepare_access_check(r);
 81	}
 82}
 83
 84struct damon_pa_access_chk_result {
 85	unsigned long page_sz;
 86	bool accessed;
 87};
 88
 89static bool __damon_pa_young(struct folio *folio, struct vm_area_struct *vma,
 90		unsigned long addr, void *arg)
 91{
 92	struct damon_pa_access_chk_result *result = arg;
 93	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
 94
 95	result->accessed = false;
 96	result->page_sz = PAGE_SIZE;
 97	while (page_vma_mapped_walk(&pvmw)) {
 98		addr = pvmw.address;
 99		if (pvmw.pte) {
100			result->accessed = pte_young(*pvmw.pte) ||
101				!folio_test_idle(folio) ||
102				mmu_notifier_test_young(vma->vm_mm, addr);
103		} else {
104#ifdef CONFIG_TRANSPARENT_HUGEPAGE
105			result->accessed = pmd_young(*pvmw.pmd) ||
106				!folio_test_idle(folio) ||
107				mmu_notifier_test_young(vma->vm_mm, addr);
108			result->page_sz = HPAGE_PMD_SIZE;
109#else
110			WARN_ON_ONCE(1);
111#endif	/* CONFIG_TRANSPARENT_HUGEPAGE */
112		}
113		if (result->accessed) {
114			page_vma_mapped_walk_done(&pvmw);
115			break;
116		}
117	}
118
119	/* If accessed, stop walking */
120	return !result->accessed;
121}
122
123static bool damon_pa_young(unsigned long paddr, unsigned long *page_sz)
124{
125	struct folio *folio;
126	struct page *page = damon_get_page(PHYS_PFN(paddr));
127	struct damon_pa_access_chk_result result = {
128		.page_sz = PAGE_SIZE,
129		.accessed = false,
130	};
131	struct rmap_walk_control rwc = {
132		.arg = &result,
133		.rmap_one = __damon_pa_young,
134		.anon_lock = folio_lock_anon_vma_read,
135	};
136	bool need_lock;
137
138	if (!page)
139		return false;
140	folio = page_folio(page);
141
142	if (!folio_mapped(folio) || !folio_raw_mapping(folio)) {
143		if (folio_test_idle(folio))
144			result.accessed = false;
145		else
146			result.accessed = true;
147		folio_put(folio);
148		goto out;
149	}
150
151	need_lock = !folio_test_anon(folio) || folio_test_ksm(folio);
152	if (need_lock && !folio_trylock(folio)) {
153		folio_put(folio);
154		return false;
155	}
156
157	rmap_walk(folio, &rwc);
158
159	if (need_lock)
160		folio_unlock(folio);
161	folio_put(folio);
162
163out:
164	*page_sz = result.page_sz;
165	return result.accessed;
 
 
 
 
 
 
 
 
 
 
 
 
166}
167
168static void __damon_pa_check_access(struct damon_region *r)
 
169{
170	static unsigned long last_addr;
171	static unsigned long last_page_sz = PAGE_SIZE;
172	static bool last_accessed;
173
174	/* If the region is in the last checked page, reuse the result */
175	if (ALIGN_DOWN(last_addr, last_page_sz) ==
176				ALIGN_DOWN(r->sampling_addr, last_page_sz)) {
177		if (last_accessed)
178			r->nr_accesses++;
179		return;
180	}
181
182	last_accessed = damon_pa_young(r->sampling_addr, &last_page_sz);
183	if (last_accessed)
184		r->nr_accesses++;
185
186	last_addr = r->sampling_addr;
187}
188
189static unsigned int damon_pa_check_accesses(struct damon_ctx *ctx)
190{
191	struct damon_target *t;
192	struct damon_region *r;
193	unsigned int max_nr_accesses = 0;
194
195	damon_for_each_target(t, ctx) {
196		damon_for_each_region(r, t) {
197			__damon_pa_check_access(r);
198			max_nr_accesses = max(r->nr_accesses, max_nr_accesses);
199		}
200	}
201
202	return max_nr_accesses;
203}
204
205static unsigned long damon_pa_pageout(struct damon_region *r)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
206{
207	unsigned long addr, applied;
208	LIST_HEAD(page_list);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
209
210	for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
211		struct page *page = damon_get_page(PHYS_PFN(addr));
212
213		if (!page)
214			continue;
215
216		ClearPageReferenced(page);
217		test_and_clear_page_young(page);
218		if (isolate_lru_page(page)) {
219			put_page(page);
220			continue;
221		}
222		if (PageUnevictable(page)) {
223			putback_lru_page(page);
224		} else {
225			list_add(&page->lru, &page_list);
226			put_page(page);
227		}
 
228	}
229	applied = reclaim_pages(&page_list);
 
 
230	cond_resched();
231	return applied * PAGE_SIZE;
232}
233
234static inline unsigned long damon_pa_mark_accessed_or_deactivate(
235		struct damon_region *r, bool mark_accessed)
236{
237	unsigned long addr, applied = 0;
238
239	for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
240		struct page *page = damon_get_page(PHYS_PFN(addr));
241
242		if (!page)
243			continue;
 
 
 
 
244		if (mark_accessed)
245			mark_page_accessed(page);
246		else
247			deactivate_page(page);
248		put_page(page);
249		applied++;
 
250	}
251	return applied * PAGE_SIZE;
252}
253
254static unsigned long damon_pa_mark_accessed(struct damon_region *r)
 
255{
256	return damon_pa_mark_accessed_or_deactivate(r, true);
257}
258
259static unsigned long damon_pa_deactivate_pages(struct damon_region *r)
 
260{
261	return damon_pa_mark_accessed_or_deactivate(r, false);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
262}
263
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
264static unsigned long damon_pa_apply_scheme(struct damon_ctx *ctx,
265		struct damon_target *t, struct damon_region *r,
266		struct damos *scheme)
267{
268	switch (scheme->action) {
269	case DAMOS_PAGEOUT:
270		return damon_pa_pageout(r);
271	case DAMOS_LRU_PRIO:
272		return damon_pa_mark_accessed(r);
273	case DAMOS_LRU_DEPRIO:
274		return damon_pa_deactivate_pages(r);
 
 
 
275	case DAMOS_STAT:
276		break;
277	default:
278		/* DAMOS actions that not yet supported by 'paddr'. */
279		break;
280	}
281	return 0;
282}
283
284static int damon_pa_scheme_score(struct damon_ctx *context,
285		struct damon_target *t, struct damon_region *r,
286		struct damos *scheme)
287{
288	switch (scheme->action) {
289	case DAMOS_PAGEOUT:
290		return damon_cold_score(context, r, scheme);
291	case DAMOS_LRU_PRIO:
292		return damon_hot_score(context, r, scheme);
293	case DAMOS_LRU_DEPRIO:
 
 
 
 
294		return damon_cold_score(context, r, scheme);
295	default:
296		break;
297	}
298
299	return DAMOS_MAX_SCORE;
300}
301
302static int __init damon_pa_initcall(void)
303{
304	struct damon_operations ops = {
305		.id = DAMON_OPS_PADDR,
306		.init = NULL,
307		.update = NULL,
308		.prepare_access_checks = damon_pa_prepare_access_checks,
309		.check_accesses = damon_pa_check_accesses,
310		.reset_aggregated = NULL,
311		.target_valid = NULL,
312		.cleanup = NULL,
313		.apply_scheme = damon_pa_apply_scheme,
314		.get_scheme_score = damon_pa_scheme_score,
315	};
316
317	return damon_register_ops(&ops);
318};
319
320subsys_initcall(damon_pa_initcall);
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * DAMON Primitives for The Physical Address Space
  4 *
  5 * Author: SeongJae Park <sj@kernel.org>
  6 */
  7
  8#define pr_fmt(fmt) "damon-pa: " fmt
  9
 10#include <linux/mmu_notifier.h>
 11#include <linux/page_idle.h>
 12#include <linux/pagemap.h>
 13#include <linux/rmap.h>
 14#include <linux/swap.h>
 15#include <linux/memory-tiers.h>
 16#include <linux/migrate.h>
 17#include <linux/mm_inline.h>
 18
 19#include "../internal.h"
 20#include "ops-common.h"
 21
 22static bool damon_folio_mkold_one(struct folio *folio,
 23		struct vm_area_struct *vma, unsigned long addr, void *arg)
 24{
 25	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
 26
 27	while (page_vma_mapped_walk(&pvmw)) {
 28		addr = pvmw.address;
 29		if (pvmw.pte)
 30			damon_ptep_mkold(pvmw.pte, vma, addr);
 31		else
 32			damon_pmdp_mkold(pvmw.pmd, vma, addr);
 33	}
 34	return true;
 35}
 36
 37static void damon_folio_mkold(struct folio *folio)
 38{
 
 
 39	struct rmap_walk_control rwc = {
 40		.rmap_one = damon_folio_mkold_one,
 41		.anon_lock = folio_lock_anon_vma_read,
 42	};
 43	bool need_lock;
 44
 
 
 
 
 45	if (!folio_mapped(folio) || !folio_raw_mapping(folio)) {
 46		folio_set_idle(folio);
 47		return;
 48	}
 49
 50	need_lock = !folio_test_anon(folio) || folio_test_ksm(folio);
 51	if (need_lock && !folio_trylock(folio))
 52		return;
 53
 54	rmap_walk(folio, &rwc);
 55
 56	if (need_lock)
 57		folio_unlock(folio);
 58
 59}
 60
 61static void damon_pa_mkold(unsigned long paddr)
 62{
 63	struct folio *folio = damon_get_folio(PHYS_PFN(paddr));
 64
 65	if (!folio)
 66		return;
 67
 68	damon_folio_mkold(folio);
 69	folio_put(folio);
 70}
 71
 72static void __damon_pa_prepare_access_check(struct damon_region *r)
 73{
 74	r->sampling_addr = damon_rand(r->ar.start, r->ar.end);
 75
 76	damon_pa_mkold(r->sampling_addr);
 77}
 78
 79static void damon_pa_prepare_access_checks(struct damon_ctx *ctx)
 80{
 81	struct damon_target *t;
 82	struct damon_region *r;
 83
 84	damon_for_each_target(t, ctx) {
 85		damon_for_each_region(r, t)
 86			__damon_pa_prepare_access_check(r);
 87	}
 88}
 89
 90static bool damon_folio_young_one(struct folio *folio,
 91		struct vm_area_struct *vma, unsigned long addr, void *arg)
 
 
 
 
 
 92{
 93	bool *accessed = arg;
 94	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
 95
 96	*accessed = false;
 
 97	while (page_vma_mapped_walk(&pvmw)) {
 98		addr = pvmw.address;
 99		if (pvmw.pte) {
100			*accessed = pte_young(ptep_get(pvmw.pte)) ||
101				!folio_test_idle(folio) ||
102				mmu_notifier_test_young(vma->vm_mm, addr);
103		} else {
104#ifdef CONFIG_TRANSPARENT_HUGEPAGE
105			*accessed = pmd_young(pmdp_get(pvmw.pmd)) ||
106				!folio_test_idle(folio) ||
107				mmu_notifier_test_young(vma->vm_mm, addr);
 
108#else
109			WARN_ON_ONCE(1);
110#endif	/* CONFIG_TRANSPARENT_HUGEPAGE */
111		}
112		if (*accessed) {
113			page_vma_mapped_walk_done(&pvmw);
114			break;
115		}
116	}
117
118	/* If accessed, stop walking */
119	return *accessed == false;
120}
121
122static bool damon_folio_young(struct folio *folio)
123{
124	bool accessed = false;
 
 
 
 
 
125	struct rmap_walk_control rwc = {
126		.arg = &accessed,
127		.rmap_one = damon_folio_young_one,
128		.anon_lock = folio_lock_anon_vma_read,
129	};
130	bool need_lock;
131
 
 
 
 
132	if (!folio_mapped(folio) || !folio_raw_mapping(folio)) {
133		if (folio_test_idle(folio))
134			return false;
135		else
136			return true;
 
 
137	}
138
139	need_lock = !folio_test_anon(folio) || folio_test_ksm(folio);
140	if (need_lock && !folio_trylock(folio))
 
141		return false;
 
142
143	rmap_walk(folio, &rwc);
144
145	if (need_lock)
146		folio_unlock(folio);
 
147
148	return accessed;
149}
150
151static bool damon_pa_young(unsigned long paddr, unsigned long *folio_sz)
152{
153	struct folio *folio = damon_get_folio(PHYS_PFN(paddr));
154	bool accessed;
155
156	if (!folio)
157		return false;
158
159	accessed = damon_folio_young(folio);
160	*folio_sz = folio_size(folio);
161	folio_put(folio);
162	return accessed;
163}
164
165static void __damon_pa_check_access(struct damon_region *r,
166		struct damon_attrs *attrs)
167{
168	static unsigned long last_addr;
169	static unsigned long last_folio_sz = PAGE_SIZE;
170	static bool last_accessed;
171
172	/* If the region is in the last checked page, reuse the result */
173	if (ALIGN_DOWN(last_addr, last_folio_sz) ==
174				ALIGN_DOWN(r->sampling_addr, last_folio_sz)) {
175		damon_update_region_access_rate(r, last_accessed, attrs);
 
176		return;
177	}
178
179	last_accessed = damon_pa_young(r->sampling_addr, &last_folio_sz);
180	damon_update_region_access_rate(r, last_accessed, attrs);
 
181
182	last_addr = r->sampling_addr;
183}
184
185static unsigned int damon_pa_check_accesses(struct damon_ctx *ctx)
186{
187	struct damon_target *t;
188	struct damon_region *r;
189	unsigned int max_nr_accesses = 0;
190
191	damon_for_each_target(t, ctx) {
192		damon_for_each_region(r, t) {
193			__damon_pa_check_access(r, &ctx->attrs);
194			max_nr_accesses = max(r->nr_accesses, max_nr_accesses);
195		}
196	}
197
198	return max_nr_accesses;
199}
200
201static bool __damos_pa_filter_out(struct damos_filter *filter,
202		struct folio *folio)
203{
204	bool matched = false;
205	struct mem_cgroup *memcg;
206
207	switch (filter->type) {
208	case DAMOS_FILTER_TYPE_ANON:
209		matched = folio_test_anon(folio);
210		break;
211	case DAMOS_FILTER_TYPE_MEMCG:
212		rcu_read_lock();
213		memcg = folio_memcg_check(folio);
214		if (!memcg)
215			matched = false;
216		else
217			matched = filter->memcg_id == mem_cgroup_id(memcg);
218		rcu_read_unlock();
219		break;
220	case DAMOS_FILTER_TYPE_YOUNG:
221		matched = damon_folio_young(folio);
222		if (matched)
223			damon_folio_mkold(folio);
224		break;
225	default:
226		break;
227	}
228
229	return matched == filter->matching;
230}
231
232/*
233 * damos_pa_filter_out - Return true if the page should be filtered out.
234 */
235static bool damos_pa_filter_out(struct damos *scheme, struct folio *folio)
236{
237	struct damos_filter *filter;
238
239	damos_for_each_filter(filter, scheme) {
240		if (__damos_pa_filter_out(filter, folio))
241			return true;
242	}
243	return false;
244}
245
246static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s)
247{
248	unsigned long addr, applied;
249	LIST_HEAD(folio_list);
250	bool install_young_filter = true;
251	struct damos_filter *filter;
252
253	/* check access in page level again by default */
254	damos_for_each_filter(filter, s) {
255		if (filter->type == DAMOS_FILTER_TYPE_YOUNG) {
256			install_young_filter = false;
257			break;
258		}
259	}
260	if (install_young_filter) {
261		filter = damos_new_filter(DAMOS_FILTER_TYPE_YOUNG, true);
262		if (!filter)
263			return 0;
264		damos_add_filter(s, filter);
265	}
266
267	for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
268		struct folio *folio = damon_get_folio(PHYS_PFN(addr));
269
270		if (!folio)
271			continue;
272
273		if (damos_pa_filter_out(s, folio))
274			goto put_folio;
275
276		folio_clear_referenced(folio);
277		folio_test_clear_young(folio);
278		if (!folio_isolate_lru(folio))
279			goto put_folio;
280		if (folio_test_unevictable(folio))
281			folio_putback_lru(folio);
282		else
283			list_add(&folio->lru, &folio_list);
284put_folio:
285		folio_put(folio);
286	}
287	if (install_young_filter)
288		damos_destroy_filter(filter);
289	applied = reclaim_pages(&folio_list);
290	cond_resched();
291	return applied * PAGE_SIZE;
292}
293
294static inline unsigned long damon_pa_mark_accessed_or_deactivate(
295		struct damon_region *r, struct damos *s, bool mark_accessed)
296{
297	unsigned long addr, applied = 0;
298
299	for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
300		struct folio *folio = damon_get_folio(PHYS_PFN(addr));
301
302		if (!folio)
303			continue;
304
305		if (damos_pa_filter_out(s, folio))
306			goto put_folio;
307
308		if (mark_accessed)
309			folio_mark_accessed(folio);
310		else
311			folio_deactivate(folio);
312		applied += folio_nr_pages(folio);
313put_folio:
314		folio_put(folio);
315	}
316	return applied * PAGE_SIZE;
317}
318
319static unsigned long damon_pa_mark_accessed(struct damon_region *r,
320	struct damos *s)
321{
322	return damon_pa_mark_accessed_or_deactivate(r, s, true);
323}
324
325static unsigned long damon_pa_deactivate_pages(struct damon_region *r,
326	struct damos *s)
327{
328	return damon_pa_mark_accessed_or_deactivate(r, s, false);
329}
330
331static unsigned int __damon_pa_migrate_folio_list(
332		struct list_head *migrate_folios, struct pglist_data *pgdat,
333		int target_nid)
334{
335	unsigned int nr_succeeded = 0;
336	nodemask_t allowed_mask = NODE_MASK_NONE;
337	struct migration_target_control mtc = {
338		/*
339		 * Allocate from 'node', or fail quickly and quietly.
340		 * When this happens, 'page' will likely just be discarded
341		 * instead of migrated.
342		 */
343		.gfp_mask = (GFP_HIGHUSER_MOVABLE & ~__GFP_RECLAIM) |
344			__GFP_NOWARN | __GFP_NOMEMALLOC | GFP_NOWAIT,
345		.nid = target_nid,
346		.nmask = &allowed_mask
347	};
348
349	if (pgdat->node_id == target_nid || target_nid == NUMA_NO_NODE)
350		return 0;
351
352	if (list_empty(migrate_folios))
353		return 0;
354
355	/* Migration ignores all cpuset and mempolicy settings */
356	migrate_pages(migrate_folios, alloc_migrate_folio, NULL,
357		      (unsigned long)&mtc, MIGRATE_ASYNC, MR_DAMON,
358		      &nr_succeeded);
359
360	return nr_succeeded;
361}
362
363static unsigned int damon_pa_migrate_folio_list(struct list_head *folio_list,
364						struct pglist_data *pgdat,
365						int target_nid)
366{
367	unsigned int nr_migrated = 0;
368	struct folio *folio;
369	LIST_HEAD(ret_folios);
370	LIST_HEAD(migrate_folios);
371
372	while (!list_empty(folio_list)) {
373		struct folio *folio;
374
375		cond_resched();
376
377		folio = lru_to_folio(folio_list);
378		list_del(&folio->lru);
379
380		if (!folio_trylock(folio))
381			goto keep;
382
383		/* Relocate its contents to another node. */
384		list_add(&folio->lru, &migrate_folios);
385		folio_unlock(folio);
386		continue;
387keep:
388		list_add(&folio->lru, &ret_folios);
389	}
390	/* 'folio_list' is always empty here */
391
392	/* Migrate folios selected for migration */
393	nr_migrated += __damon_pa_migrate_folio_list(
394			&migrate_folios, pgdat, target_nid);
395	/*
396	 * Folios that could not be migrated are still in @migrate_folios.  Add
397	 * those back on @folio_list
398	 */
399	if (!list_empty(&migrate_folios))
400		list_splice_init(&migrate_folios, folio_list);
401
402	try_to_unmap_flush();
403
404	list_splice(&ret_folios, folio_list);
405
406	while (!list_empty(folio_list)) {
407		folio = lru_to_folio(folio_list);
408		list_del(&folio->lru);
409		folio_putback_lru(folio);
410	}
411
412	return nr_migrated;
413}
414
415static unsigned long damon_pa_migrate_pages(struct list_head *folio_list,
416					    int target_nid)
417{
418	int nid;
419	unsigned long nr_migrated = 0;
420	LIST_HEAD(node_folio_list);
421	unsigned int noreclaim_flag;
422
423	if (list_empty(folio_list))
424		return nr_migrated;
425
426	noreclaim_flag = memalloc_noreclaim_save();
427
428	nid = folio_nid(lru_to_folio(folio_list));
429	do {
430		struct folio *folio = lru_to_folio(folio_list);
431
432		if (nid == folio_nid(folio)) {
433			list_move(&folio->lru, &node_folio_list);
434			continue;
435		}
436
437		nr_migrated += damon_pa_migrate_folio_list(&node_folio_list,
438							   NODE_DATA(nid),
439							   target_nid);
440		nid = folio_nid(lru_to_folio(folio_list));
441	} while (!list_empty(folio_list));
442
443	nr_migrated += damon_pa_migrate_folio_list(&node_folio_list,
444						   NODE_DATA(nid),
445						   target_nid);
446
447	memalloc_noreclaim_restore(noreclaim_flag);
448
449	return nr_migrated;
450}
451
452static unsigned long damon_pa_migrate(struct damon_region *r, struct damos *s)
453{
454	unsigned long addr, applied;
455	LIST_HEAD(folio_list);
456
457	for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
458		struct folio *folio = damon_get_folio(PHYS_PFN(addr));
459
460		if (!folio)
461			continue;
462
463		if (damos_pa_filter_out(s, folio))
464			goto put_folio;
465
466		if (!folio_isolate_lru(folio))
467			goto put_folio;
468		list_add(&folio->lru, &folio_list);
469put_folio:
470		folio_put(folio);
471	}
472	applied = damon_pa_migrate_pages(&folio_list, s->target_nid);
473	cond_resched();
474	return applied * PAGE_SIZE;
475}
476
477
478static unsigned long damon_pa_apply_scheme(struct damon_ctx *ctx,
479		struct damon_target *t, struct damon_region *r,
480		struct damos *scheme)
481{
482	switch (scheme->action) {
483	case DAMOS_PAGEOUT:
484		return damon_pa_pageout(r, scheme);
485	case DAMOS_LRU_PRIO:
486		return damon_pa_mark_accessed(r, scheme);
487	case DAMOS_LRU_DEPRIO:
488		return damon_pa_deactivate_pages(r, scheme);
489	case DAMOS_MIGRATE_HOT:
490	case DAMOS_MIGRATE_COLD:
491		return damon_pa_migrate(r, scheme);
492	case DAMOS_STAT:
493		break;
494	default:
495		/* DAMOS actions that not yet supported by 'paddr'. */
496		break;
497	}
498	return 0;
499}
500
501static int damon_pa_scheme_score(struct damon_ctx *context,
502		struct damon_target *t, struct damon_region *r,
503		struct damos *scheme)
504{
505	switch (scheme->action) {
506	case DAMOS_PAGEOUT:
507		return damon_cold_score(context, r, scheme);
508	case DAMOS_LRU_PRIO:
509		return damon_hot_score(context, r, scheme);
510	case DAMOS_LRU_DEPRIO:
511		return damon_cold_score(context, r, scheme);
512	case DAMOS_MIGRATE_HOT:
513		return damon_hot_score(context, r, scheme);
514	case DAMOS_MIGRATE_COLD:
515		return damon_cold_score(context, r, scheme);
516	default:
517		break;
518	}
519
520	return DAMOS_MAX_SCORE;
521}
522
523static int __init damon_pa_initcall(void)
524{
525	struct damon_operations ops = {
526		.id = DAMON_OPS_PADDR,
527		.init = NULL,
528		.update = NULL,
529		.prepare_access_checks = damon_pa_prepare_access_checks,
530		.check_accesses = damon_pa_check_accesses,
531		.reset_aggregated = NULL,
532		.target_valid = NULL,
533		.cleanup = NULL,
534		.apply_scheme = damon_pa_apply_scheme,
535		.get_scheme_score = damon_pa_scheme_score,
536	};
537
538	return damon_register_ops(&ops);
539};
540
541subsys_initcall(damon_pa_initcall);