Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * DAMON Primitives for The Physical Address Space
  4 *
  5 * Author: SeongJae Park <sj@kernel.org>
  6 */
  7
  8#define pr_fmt(fmt) "damon-pa: " fmt
  9
 10#include <linux/mmu_notifier.h>
 11#include <linux/page_idle.h>
 12#include <linux/pagemap.h>
 13#include <linux/rmap.h>
 14#include <linux/swap.h>
 15
 16#include "../internal.h"
 17#include "ops-common.h"
 18
 19static bool __damon_pa_mkold(struct folio *folio, struct vm_area_struct *vma,
 20		unsigned long addr, void *arg)
 21{
 22	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
 23
 24	while (page_vma_mapped_walk(&pvmw)) {
 25		addr = pvmw.address;
 26		if (pvmw.pte)
 27			damon_ptep_mkold(pvmw.pte, vma->vm_mm, addr);
 28		else
 29			damon_pmdp_mkold(pvmw.pmd, vma->vm_mm, addr);
 30	}
 31	return true;
 32}
 33
 34static void damon_pa_mkold(unsigned long paddr)
 35{
 36	struct folio *folio;
 37	struct page *page = damon_get_page(PHYS_PFN(paddr));
 38	struct rmap_walk_control rwc = {
 39		.rmap_one = __damon_pa_mkold,
 40		.anon_lock = folio_lock_anon_vma_read,
 41	};
 42	bool need_lock;
 43
 44	if (!page)
 45		return;
 46	folio = page_folio(page);
 47
 48	if (!folio_mapped(folio) || !folio_raw_mapping(folio)) {
 49		folio_set_idle(folio);
 50		goto out;
 51	}
 52
 53	need_lock = !folio_test_anon(folio) || folio_test_ksm(folio);
 54	if (need_lock && !folio_trylock(folio))
 55		goto out;
 56
 57	rmap_walk(folio, &rwc);
 58
 59	if (need_lock)
 60		folio_unlock(folio);
 61
 62out:
 63	folio_put(folio);
 64}
 65
 66static void __damon_pa_prepare_access_check(struct damon_region *r)
 67{
 68	r->sampling_addr = damon_rand(r->ar.start, r->ar.end);
 69
 70	damon_pa_mkold(r->sampling_addr);
 71}
 72
 73static void damon_pa_prepare_access_checks(struct damon_ctx *ctx)
 74{
 75	struct damon_target *t;
 76	struct damon_region *r;
 77
 78	damon_for_each_target(t, ctx) {
 79		damon_for_each_region(r, t)
 80			__damon_pa_prepare_access_check(r);
 81	}
 82}
 83
 84struct damon_pa_access_chk_result {
 85	unsigned long page_sz;
 86	bool accessed;
 87};
 88
 89static bool __damon_pa_young(struct folio *folio, struct vm_area_struct *vma,
 90		unsigned long addr, void *arg)
 91{
 92	struct damon_pa_access_chk_result *result = arg;
 93	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
 94
 95	result->accessed = false;
 96	result->page_sz = PAGE_SIZE;
 97	while (page_vma_mapped_walk(&pvmw)) {
 98		addr = pvmw.address;
 99		if (pvmw.pte) {
100			result->accessed = pte_young(*pvmw.pte) ||
101				!folio_test_idle(folio) ||
102				mmu_notifier_test_young(vma->vm_mm, addr);
103		} else {
104#ifdef CONFIG_TRANSPARENT_HUGEPAGE
105			result->accessed = pmd_young(*pvmw.pmd) ||
106				!folio_test_idle(folio) ||
107				mmu_notifier_test_young(vma->vm_mm, addr);
108			result->page_sz = HPAGE_PMD_SIZE;
109#else
110			WARN_ON_ONCE(1);
111#endif	/* CONFIG_TRANSPARENT_HUGEPAGE */
112		}
113		if (result->accessed) {
114			page_vma_mapped_walk_done(&pvmw);
115			break;
116		}
117	}
118
119	/* If accessed, stop walking */
120	return !result->accessed;
121}
122
123static bool damon_pa_young(unsigned long paddr, unsigned long *page_sz)
124{
125	struct folio *folio;
126	struct page *page = damon_get_page(PHYS_PFN(paddr));
127	struct damon_pa_access_chk_result result = {
128		.page_sz = PAGE_SIZE,
129		.accessed = false,
130	};
131	struct rmap_walk_control rwc = {
132		.arg = &result,
133		.rmap_one = __damon_pa_young,
134		.anon_lock = folio_lock_anon_vma_read,
135	};
136	bool need_lock;
137
138	if (!page)
139		return false;
140	folio = page_folio(page);
141
142	if (!folio_mapped(folio) || !folio_raw_mapping(folio)) {
143		if (folio_test_idle(folio))
144			result.accessed = false;
145		else
146			result.accessed = true;
147		folio_put(folio);
148		goto out;
149	}
150
151	need_lock = !folio_test_anon(folio) || folio_test_ksm(folio);
152	if (need_lock && !folio_trylock(folio)) {
153		folio_put(folio);
154		return false;
155	}
156
157	rmap_walk(folio, &rwc);
158
159	if (need_lock)
160		folio_unlock(folio);
161	folio_put(folio);
162
163out:
164	*page_sz = result.page_sz;
165	return result.accessed;
 
166}
167
168static void __damon_pa_check_access(struct damon_region *r)
 
169{
170	static unsigned long last_addr;
171	static unsigned long last_page_sz = PAGE_SIZE;
172	static bool last_accessed;
173
174	/* If the region is in the last checked page, reuse the result */
175	if (ALIGN_DOWN(last_addr, last_page_sz) ==
176				ALIGN_DOWN(r->sampling_addr, last_page_sz)) {
177		if (last_accessed)
178			r->nr_accesses++;
179		return;
180	}
181
182	last_accessed = damon_pa_young(r->sampling_addr, &last_page_sz);
183	if (last_accessed)
184		r->nr_accesses++;
185
186	last_addr = r->sampling_addr;
187}
188
189static unsigned int damon_pa_check_accesses(struct damon_ctx *ctx)
190{
191	struct damon_target *t;
192	struct damon_region *r;
193	unsigned int max_nr_accesses = 0;
194
195	damon_for_each_target(t, ctx) {
196		damon_for_each_region(r, t) {
197			__damon_pa_check_access(r);
198			max_nr_accesses = max(r->nr_accesses, max_nr_accesses);
199		}
200	}
201
202	return max_nr_accesses;
203}
204
205static unsigned long damon_pa_pageout(struct damon_region *r)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
206{
207	unsigned long addr, applied;
208	LIST_HEAD(page_list);
209
210	for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
211		struct page *page = damon_get_page(PHYS_PFN(addr));
212
213		if (!page)
214			continue;
215
216		ClearPageReferenced(page);
217		test_and_clear_page_young(page);
218		if (isolate_lru_page(page)) {
219			put_page(page);
220			continue;
221		}
222		if (PageUnevictable(page)) {
223			putback_lru_page(page);
224		} else {
225			list_add(&page->lru, &page_list);
226			put_page(page);
227		}
 
228	}
229	applied = reclaim_pages(&page_list);
230	cond_resched();
231	return applied * PAGE_SIZE;
232}
233
234static inline unsigned long damon_pa_mark_accessed_or_deactivate(
235		struct damon_region *r, bool mark_accessed)
236{
237	unsigned long addr, applied = 0;
238
239	for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
240		struct page *page = damon_get_page(PHYS_PFN(addr));
241
242		if (!page)
243			continue;
 
 
 
 
244		if (mark_accessed)
245			mark_page_accessed(page);
246		else
247			deactivate_page(page);
248		put_page(page);
249		applied++;
 
250	}
251	return applied * PAGE_SIZE;
252}
253
254static unsigned long damon_pa_mark_accessed(struct damon_region *r)
 
255{
256	return damon_pa_mark_accessed_or_deactivate(r, true);
257}
258
259static unsigned long damon_pa_deactivate_pages(struct damon_region *r)
 
260{
261	return damon_pa_mark_accessed_or_deactivate(r, false);
262}
263
264static unsigned long damon_pa_apply_scheme(struct damon_ctx *ctx,
265		struct damon_target *t, struct damon_region *r,
266		struct damos *scheme)
267{
268	switch (scheme->action) {
269	case DAMOS_PAGEOUT:
270		return damon_pa_pageout(r);
271	case DAMOS_LRU_PRIO:
272		return damon_pa_mark_accessed(r);
273	case DAMOS_LRU_DEPRIO:
274		return damon_pa_deactivate_pages(r);
275	case DAMOS_STAT:
276		break;
277	default:
278		/* DAMOS actions that not yet supported by 'paddr'. */
279		break;
280	}
281	return 0;
282}
283
284static int damon_pa_scheme_score(struct damon_ctx *context,
285		struct damon_target *t, struct damon_region *r,
286		struct damos *scheme)
287{
288	switch (scheme->action) {
289	case DAMOS_PAGEOUT:
290		return damon_cold_score(context, r, scheme);
291	case DAMOS_LRU_PRIO:
292		return damon_hot_score(context, r, scheme);
293	case DAMOS_LRU_DEPRIO:
294		return damon_cold_score(context, r, scheme);
295	default:
296		break;
297	}
298
299	return DAMOS_MAX_SCORE;
300}
301
302static int __init damon_pa_initcall(void)
303{
304	struct damon_operations ops = {
305		.id = DAMON_OPS_PADDR,
306		.init = NULL,
307		.update = NULL,
308		.prepare_access_checks = damon_pa_prepare_access_checks,
309		.check_accesses = damon_pa_check_accesses,
310		.reset_aggregated = NULL,
311		.target_valid = NULL,
312		.cleanup = NULL,
313		.apply_scheme = damon_pa_apply_scheme,
314		.get_scheme_score = damon_pa_scheme_score,
315	};
316
317	return damon_register_ops(&ops);
318};
319
320subsys_initcall(damon_pa_initcall);
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * DAMON Primitives for The Physical Address Space
  4 *
  5 * Author: SeongJae Park <sj@kernel.org>
  6 */
  7
  8#define pr_fmt(fmt) "damon-pa: " fmt
  9
 10#include <linux/mmu_notifier.h>
 11#include <linux/page_idle.h>
 12#include <linux/pagemap.h>
 13#include <linux/rmap.h>
 14#include <linux/swap.h>
 15
 16#include "../internal.h"
 17#include "ops-common.h"
 18
 19static bool __damon_pa_mkold(struct folio *folio, struct vm_area_struct *vma,
 20		unsigned long addr, void *arg)
 21{
 22	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
 23
 24	while (page_vma_mapped_walk(&pvmw)) {
 25		addr = pvmw.address;
 26		if (pvmw.pte)
 27			damon_ptep_mkold(pvmw.pte, vma, addr);
 28		else
 29			damon_pmdp_mkold(pvmw.pmd, vma, addr);
 30	}
 31	return true;
 32}
 33
 34static void damon_pa_mkold(unsigned long paddr)
 35{
 36	struct folio *folio = damon_get_folio(PHYS_PFN(paddr));
 
 37	struct rmap_walk_control rwc = {
 38		.rmap_one = __damon_pa_mkold,
 39		.anon_lock = folio_lock_anon_vma_read,
 40	};
 41	bool need_lock;
 42
 43	if (!folio)
 44		return;
 
 45
 46	if (!folio_mapped(folio) || !folio_raw_mapping(folio)) {
 47		folio_set_idle(folio);
 48		goto out;
 49	}
 50
 51	need_lock = !folio_test_anon(folio) || folio_test_ksm(folio);
 52	if (need_lock && !folio_trylock(folio))
 53		goto out;
 54
 55	rmap_walk(folio, &rwc);
 56
 57	if (need_lock)
 58		folio_unlock(folio);
 59
 60out:
 61	folio_put(folio);
 62}
 63
 64static void __damon_pa_prepare_access_check(struct damon_region *r)
 65{
 66	r->sampling_addr = damon_rand(r->ar.start, r->ar.end);
 67
 68	damon_pa_mkold(r->sampling_addr);
 69}
 70
 71static void damon_pa_prepare_access_checks(struct damon_ctx *ctx)
 72{
 73	struct damon_target *t;
 74	struct damon_region *r;
 75
 76	damon_for_each_target(t, ctx) {
 77		damon_for_each_region(r, t)
 78			__damon_pa_prepare_access_check(r);
 79	}
 80}
 81
 
 
 
 
 
 82static bool __damon_pa_young(struct folio *folio, struct vm_area_struct *vma,
 83		unsigned long addr, void *arg)
 84{
 85	bool *accessed = arg;
 86	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
 87
 88	*accessed = false;
 
 89	while (page_vma_mapped_walk(&pvmw)) {
 90		addr = pvmw.address;
 91		if (pvmw.pte) {
 92			*accessed = pte_young(ptep_get(pvmw.pte)) ||
 93				!folio_test_idle(folio) ||
 94				mmu_notifier_test_young(vma->vm_mm, addr);
 95		} else {
 96#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 97			*accessed = pmd_young(pmdp_get(pvmw.pmd)) ||
 98				!folio_test_idle(folio) ||
 99				mmu_notifier_test_young(vma->vm_mm, addr);
 
100#else
101			WARN_ON_ONCE(1);
102#endif	/* CONFIG_TRANSPARENT_HUGEPAGE */
103		}
104		if (*accessed) {
105			page_vma_mapped_walk_done(&pvmw);
106			break;
107		}
108	}
109
110	/* If accessed, stop walking */
111	return *accessed == false;
112}
113
114static bool damon_pa_young(unsigned long paddr, unsigned long *folio_sz)
115{
116	struct folio *folio = damon_get_folio(PHYS_PFN(paddr));
117	bool accessed = false;
 
 
 
 
118	struct rmap_walk_control rwc = {
119		.arg = &accessed,
120		.rmap_one = __damon_pa_young,
121		.anon_lock = folio_lock_anon_vma_read,
122	};
123	bool need_lock;
124
125	if (!folio)
126		return false;
 
127
128	if (!folio_mapped(folio) || !folio_raw_mapping(folio)) {
129		if (folio_test_idle(folio))
130			accessed = false;
131		else
132			accessed = true;
 
133		goto out;
134	}
135
136	need_lock = !folio_test_anon(folio) || folio_test_ksm(folio);
137	if (need_lock && !folio_trylock(folio))
138		goto out;
 
 
139
140	rmap_walk(folio, &rwc);
141
142	if (need_lock)
143		folio_unlock(folio);
 
144
145out:
146	*folio_sz = folio_size(folio);
147	folio_put(folio);
148	return accessed;
149}
150
151static void __damon_pa_check_access(struct damon_region *r,
152		struct damon_attrs *attrs)
153{
154	static unsigned long last_addr;
155	static unsigned long last_folio_sz = PAGE_SIZE;
156	static bool last_accessed;
157
158	/* If the region is in the last checked page, reuse the result */
159	if (ALIGN_DOWN(last_addr, last_folio_sz) ==
160				ALIGN_DOWN(r->sampling_addr, last_folio_sz)) {
161		damon_update_region_access_rate(r, last_accessed, attrs);
 
162		return;
163	}
164
165	last_accessed = damon_pa_young(r->sampling_addr, &last_folio_sz);
166	damon_update_region_access_rate(r, last_accessed, attrs);
 
167
168	last_addr = r->sampling_addr;
169}
170
171static unsigned int damon_pa_check_accesses(struct damon_ctx *ctx)
172{
173	struct damon_target *t;
174	struct damon_region *r;
175	unsigned int max_nr_accesses = 0;
176
177	damon_for_each_target(t, ctx) {
178		damon_for_each_region(r, t) {
179			__damon_pa_check_access(r, &ctx->attrs);
180			max_nr_accesses = max(r->nr_accesses, max_nr_accesses);
181		}
182	}
183
184	return max_nr_accesses;
185}
186
187static bool __damos_pa_filter_out(struct damos_filter *filter,
188		struct folio *folio)
189{
190	bool matched = false;
191	struct mem_cgroup *memcg;
192
193	switch (filter->type) {
194	case DAMOS_FILTER_TYPE_ANON:
195		matched = folio_test_anon(folio);
196		break;
197	case DAMOS_FILTER_TYPE_MEMCG:
198		rcu_read_lock();
199		memcg = folio_memcg_check(folio);
200		if (!memcg)
201			matched = false;
202		else
203			matched = filter->memcg_id == mem_cgroup_id(memcg);
204		rcu_read_unlock();
205		break;
206	default:
207		break;
208	}
209
210	return matched == filter->matching;
211}
212
213/*
214 * damos_pa_filter_out - Return true if the page should be filtered out.
215 */
216static bool damos_pa_filter_out(struct damos *scheme, struct folio *folio)
217{
218	struct damos_filter *filter;
219
220	damos_for_each_filter(filter, scheme) {
221		if (__damos_pa_filter_out(filter, folio))
222			return true;
223	}
224	return false;
225}
226
227static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s)
228{
229	unsigned long addr, applied;
230	LIST_HEAD(folio_list);
231
232	for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
233		struct folio *folio = damon_get_folio(PHYS_PFN(addr));
234
235		if (!folio)
236			continue;
237
238		if (damos_pa_filter_out(s, folio))
239			goto put_folio;
240
241		folio_clear_referenced(folio);
242		folio_test_clear_young(folio);
243		if (!folio_isolate_lru(folio))
244			goto put_folio;
245		if (folio_test_unevictable(folio))
246			folio_putback_lru(folio);
247		else
248			list_add(&folio->lru, &folio_list);
249put_folio:
250		folio_put(folio);
251	}
252	applied = reclaim_pages(&folio_list, false);
253	cond_resched();
254	return applied * PAGE_SIZE;
255}
256
257static inline unsigned long damon_pa_mark_accessed_or_deactivate(
258		struct damon_region *r, struct damos *s, bool mark_accessed)
259{
260	unsigned long addr, applied = 0;
261
262	for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
263		struct folio *folio = damon_get_folio(PHYS_PFN(addr));
264
265		if (!folio)
266			continue;
267
268		if (damos_pa_filter_out(s, folio))
269			goto put_folio;
270
271		if (mark_accessed)
272			folio_mark_accessed(folio);
273		else
274			folio_deactivate(folio);
275		applied += folio_nr_pages(folio);
276put_folio:
277		folio_put(folio);
278	}
279	return applied * PAGE_SIZE;
280}
281
282static unsigned long damon_pa_mark_accessed(struct damon_region *r,
283	struct damos *s)
284{
285	return damon_pa_mark_accessed_or_deactivate(r, s, true);
286}
287
288static unsigned long damon_pa_deactivate_pages(struct damon_region *r,
289	struct damos *s)
290{
291	return damon_pa_mark_accessed_or_deactivate(r, s, false);
292}
293
294static unsigned long damon_pa_apply_scheme(struct damon_ctx *ctx,
295		struct damon_target *t, struct damon_region *r,
296		struct damos *scheme)
297{
298	switch (scheme->action) {
299	case DAMOS_PAGEOUT:
300		return damon_pa_pageout(r, scheme);
301	case DAMOS_LRU_PRIO:
302		return damon_pa_mark_accessed(r, scheme);
303	case DAMOS_LRU_DEPRIO:
304		return damon_pa_deactivate_pages(r, scheme);
305	case DAMOS_STAT:
306		break;
307	default:
308		/* DAMOS actions that not yet supported by 'paddr'. */
309		break;
310	}
311	return 0;
312}
313
314static int damon_pa_scheme_score(struct damon_ctx *context,
315		struct damon_target *t, struct damon_region *r,
316		struct damos *scheme)
317{
318	switch (scheme->action) {
319	case DAMOS_PAGEOUT:
320		return damon_cold_score(context, r, scheme);
321	case DAMOS_LRU_PRIO:
322		return damon_hot_score(context, r, scheme);
323	case DAMOS_LRU_DEPRIO:
324		return damon_cold_score(context, r, scheme);
325	default:
326		break;
327	}
328
329	return DAMOS_MAX_SCORE;
330}
331
332static int __init damon_pa_initcall(void)
333{
334	struct damon_operations ops = {
335		.id = DAMON_OPS_PADDR,
336		.init = NULL,
337		.update = NULL,
338		.prepare_access_checks = damon_pa_prepare_access_checks,
339		.check_accesses = damon_pa_check_accesses,
340		.reset_aggregated = NULL,
341		.target_valid = NULL,
342		.cleanup = NULL,
343		.apply_scheme = damon_pa_apply_scheme,
344		.get_scheme_score = damon_pa_scheme_score,
345	};
346
347	return damon_register_ops(&ops);
348};
349
350subsys_initcall(damon_pa_initcall);