Linux Audio

Check our new training course

Loading...
v4.6
 
  1#include <linux/init.h>
  2#include <linux/bootmem.h>
  3#include <linux/fs.h>
  4#include <linux/sysfs.h>
  5#include <linux/kobject.h>
 
  6#include <linux/mm.h>
  7#include <linux/mmzone.h>
  8#include <linux/pagemap.h>
  9#include <linux/rmap.h>
 10#include <linux/mmu_notifier.h>
 11#include <linux/page_ext.h>
 12#include <linux/page_idle.h>
 13
 14#define BITMAP_CHUNK_SIZE	sizeof(u64)
 15#define BITMAP_CHUNK_BITS	(BITMAP_CHUNK_SIZE * BITS_PER_BYTE)
 16
 17/*
 18 * Idle page tracking only considers user memory pages, for other types of
 19 * pages the idle flag is always unset and an attempt to set it is silently
 20 * ignored.
 21 *
 22 * We treat a page as a user memory page if it is on an LRU list, because it is
 23 * always safe to pass such a page to rmap_walk(), which is essential for idle
 24 * page tracking. With such an indicator of user pages we can skip isolated
 25 * pages, but since there are not usually many of them, it will hardly affect
 26 * the overall result.
 27 *
 28 * This function tries to get a user memory page by pfn as described above.
 29 */
 30static struct page *page_idle_get_page(unsigned long pfn)
 31{
 32	struct page *page;
 33	struct zone *zone;
 34
 35	if (!pfn_valid(pfn))
 36		return NULL;
 37
 38	page = pfn_to_page(pfn);
 39	if (!page || !PageLRU(page) ||
 40	    !get_page_unless_zero(page))
 41		return NULL;
 42
 43	zone = page_zone(page);
 44	spin_lock_irq(&zone->lru_lock);
 45	if (unlikely(!PageLRU(page))) {
 46		put_page(page);
 47		page = NULL;
 48	}
 49	spin_unlock_irq(&zone->lru_lock);
 50	return page;
 51}
 52
 53static int page_idle_clear_pte_refs_one(struct page *page,
 54					struct vm_area_struct *vma,
 55					unsigned long addr, void *arg)
 56{
 57	struct mm_struct *mm = vma->vm_mm;
 58	pmd_t *pmd;
 59	pte_t *pte;
 60	spinlock_t *ptl;
 
 61	bool referenced = false;
 62
 63	if (!page_check_address_transhuge(page, mm, addr, &pmd, &pte, &ptl))
 64		return SWAP_AGAIN;
 65
 66	if (pte) {
 67		referenced = ptep_clear_young_notify(vma, addr, pte);
 68		pte_unmap(pte);
 69	} else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
 70		referenced = pmdp_clear_young_notify(vma, addr, pmd);
 71	} else {
 72		/* unexpected pmd-mapped page? */
 73		WARN_ON_ONCE(1);
 
 
 
 
 
 74	}
 75
 76	spin_unlock(ptl);
 77
 78	if (referenced) {
 79		clear_page_idle(page);
 80		/*
 81		 * We cleared the referenced bit in a mapping to this page. To
 82		 * avoid interference with page reclaim, mark it young so that
 83		 * page_referenced() will return > 0.
 84		 */
 85		set_page_young(page);
 86	}
 87	return SWAP_AGAIN;
 88}
 89
 90static void page_idle_clear_pte_refs(struct page *page)
 91{
 92	/*
 93	 * Since rwc.arg is unused, rwc is effectively immutable, so we
 94	 * can make it static const to save some cycles and stack.
 95	 */
 96	static const struct rmap_walk_control rwc = {
 97		.rmap_one = page_idle_clear_pte_refs_one,
 98		.anon_lock = page_lock_anon_vma_read,
 99	};
100	bool need_lock;
101
102	if (!page_mapped(page) ||
103	    !page_rmapping(page))
104		return;
105
106	need_lock = !PageAnon(page) || PageKsm(page);
107	if (need_lock && !trylock_page(page))
108		return;
109
110	rmap_walk(page, (struct rmap_walk_control *)&rwc);
111
112	if (need_lock)
113		unlock_page(page);
114}
115
116static ssize_t page_idle_bitmap_read(struct file *file, struct kobject *kobj,
117				     struct bin_attribute *attr, char *buf,
118				     loff_t pos, size_t count)
119{
120	u64 *out = (u64 *)buf;
121	struct page *page;
122	unsigned long pfn, end_pfn;
123	int bit;
124
125	if (pos % BITMAP_CHUNK_SIZE || count % BITMAP_CHUNK_SIZE)
126		return -EINVAL;
127
128	pfn = pos * BITS_PER_BYTE;
129	if (pfn >= max_pfn)
130		return 0;
131
132	end_pfn = pfn + count * BITS_PER_BYTE;
133	if (end_pfn > max_pfn)
134		end_pfn = ALIGN(max_pfn, BITMAP_CHUNK_BITS);
135
136	for (; pfn < end_pfn; pfn++) {
137		bit = pfn % BITMAP_CHUNK_BITS;
138		if (!bit)
139			*out = 0ULL;
140		page = page_idle_get_page(pfn);
141		if (page) {
142			if (page_is_idle(page)) {
143				/*
144				 * The page might have been referenced via a
145				 * pte, in which case it is not idle. Clear
146				 * refs and recheck.
147				 */
148				page_idle_clear_pte_refs(page);
149				if (page_is_idle(page))
150					*out |= 1ULL << bit;
151			}
152			put_page(page);
153		}
154		if (bit == BITMAP_CHUNK_BITS - 1)
155			out++;
156		cond_resched();
157	}
158	return (char *)out - buf;
159}
160
161static ssize_t page_idle_bitmap_write(struct file *file, struct kobject *kobj,
162				      struct bin_attribute *attr, char *buf,
163				      loff_t pos, size_t count)
164{
165	const u64 *in = (u64 *)buf;
166	struct page *page;
167	unsigned long pfn, end_pfn;
168	int bit;
169
170	if (pos % BITMAP_CHUNK_SIZE || count % BITMAP_CHUNK_SIZE)
171		return -EINVAL;
172
173	pfn = pos * BITS_PER_BYTE;
174	if (pfn >= max_pfn)
175		return -ENXIO;
176
177	end_pfn = pfn + count * BITS_PER_BYTE;
178	if (end_pfn > max_pfn)
179		end_pfn = ALIGN(max_pfn, BITMAP_CHUNK_BITS);
180
181	for (; pfn < end_pfn; pfn++) {
182		bit = pfn % BITMAP_CHUNK_BITS;
183		if ((*in >> bit) & 1) {
184			page = page_idle_get_page(pfn);
185			if (page) {
186				page_idle_clear_pte_refs(page);
187				set_page_idle(page);
188				put_page(page);
189			}
190		}
191		if (bit == BITMAP_CHUNK_BITS - 1)
192			in++;
193		cond_resched();
194	}
195	return (char *)in - buf;
196}
197
198static struct bin_attribute page_idle_bitmap_attr =
199		__BIN_ATTR(bitmap, S_IRUSR | S_IWUSR,
200			   page_idle_bitmap_read, page_idle_bitmap_write, 0);
201
202static struct bin_attribute *page_idle_bin_attrs[] = {
203	&page_idle_bitmap_attr,
204	NULL,
205};
206
207static struct attribute_group page_idle_attr_group = {
208	.bin_attrs = page_idle_bin_attrs,
209	.name = "page_idle",
210};
211
212#ifndef CONFIG_64BIT
213static bool need_page_idle(void)
214{
215	return true;
216}
217struct page_ext_operations page_idle_ops = {
218	.need = need_page_idle,
219};
220#endif
221
222static int __init page_idle_init(void)
223{
224	int err;
225
226	err = sysfs_create_group(mm_kobj, &page_idle_attr_group);
227	if (err) {
228		pr_err("page_idle: register sysfs failed\n");
229		return err;
230	}
231	return 0;
232}
233subsys_initcall(page_idle_init);
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/init.h>
  3#include <linux/memblock.h>
  4#include <linux/fs.h>
  5#include <linux/sysfs.h>
  6#include <linux/kobject.h>
  7#include <linux/memory_hotplug.h>
  8#include <linux/mm.h>
  9#include <linux/mmzone.h>
 10#include <linux/pagemap.h>
 11#include <linux/rmap.h>
 12#include <linux/mmu_notifier.h>
 13#include <linux/page_ext.h>
 14#include <linux/page_idle.h>
 15
 16#define BITMAP_CHUNK_SIZE	sizeof(u64)
 17#define BITMAP_CHUNK_BITS	(BITMAP_CHUNK_SIZE * BITS_PER_BYTE)
 18
 19/*
 20 * Idle page tracking only considers user memory pages, for other types of
 21 * pages the idle flag is always unset and an attempt to set it is silently
 22 * ignored.
 23 *
 24 * We treat a page as a user memory page if it is on an LRU list, because it is
 25 * always safe to pass such a page to rmap_walk(), which is essential for idle
 26 * page tracking. With such an indicator of user pages we can skip isolated
 27 * pages, but since there are not usually many of them, it will hardly affect
 28 * the overall result.
 29 *
 30 * This function tries to get a user memory page by pfn as described above.
 31 */
 32static struct page *page_idle_get_page(unsigned long pfn)
 33{
 34	struct page *page = pfn_to_online_page(pfn);
 35	pg_data_t *pgdat;
 
 
 
 36
 
 37	if (!page || !PageLRU(page) ||
 38	    !get_page_unless_zero(page))
 39		return NULL;
 40
 41	pgdat = page_pgdat(page);
 42	spin_lock_irq(&pgdat->lru_lock);
 43	if (unlikely(!PageLRU(page))) {
 44		put_page(page);
 45		page = NULL;
 46	}
 47	spin_unlock_irq(&pgdat->lru_lock);
 48	return page;
 49}
 50
 51static bool page_idle_clear_pte_refs_one(struct page *page,
 52					struct vm_area_struct *vma,
 53					unsigned long addr, void *arg)
 54{
 55	struct page_vma_mapped_walk pvmw = {
 56		.page = page,
 57		.vma = vma,
 58		.address = addr,
 59	};
 60	bool referenced = false;
 61
 62	while (page_vma_mapped_walk(&pvmw)) {
 63		addr = pvmw.address;
 64		if (pvmw.pte) {
 65			/*
 66			 * For PTE-mapped THP, one sub page is referenced,
 67			 * the whole THP is referenced.
 68			 */
 69			if (ptep_clear_young_notify(vma, addr, pvmw.pte))
 70				referenced = true;
 71		} else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
 72			if (pmdp_clear_young_notify(vma, addr, pvmw.pmd))
 73				referenced = true;
 74		} else {
 75			/* unexpected pmd-mapped page? */
 76			WARN_ON_ONCE(1);
 77		}
 78	}
 79
 
 
 80	if (referenced) {
 81		clear_page_idle(page);
 82		/*
 83		 * We cleared the referenced bit in a mapping to this page. To
 84		 * avoid interference with page reclaim, mark it young so that
 85		 * page_referenced() will return > 0.
 86		 */
 87		set_page_young(page);
 88	}
 89	return true;
 90}
 91
 92static void page_idle_clear_pte_refs(struct page *page)
 93{
 94	/*
 95	 * Since rwc.arg is unused, rwc is effectively immutable, so we
 96	 * can make it static const to save some cycles and stack.
 97	 */
 98	static const struct rmap_walk_control rwc = {
 99		.rmap_one = page_idle_clear_pte_refs_one,
100		.anon_lock = page_lock_anon_vma_read,
101	};
102	bool need_lock;
103
104	if (!page_mapped(page) ||
105	    !page_rmapping(page))
106		return;
107
108	need_lock = !PageAnon(page) || PageKsm(page);
109	if (need_lock && !trylock_page(page))
110		return;
111
112	rmap_walk(page, (struct rmap_walk_control *)&rwc);
113
114	if (need_lock)
115		unlock_page(page);
116}
117
118static ssize_t page_idle_bitmap_read(struct file *file, struct kobject *kobj,
119				     struct bin_attribute *attr, char *buf,
120				     loff_t pos, size_t count)
121{
122	u64 *out = (u64 *)buf;
123	struct page *page;
124	unsigned long pfn, end_pfn;
125	int bit;
126
127	if (pos % BITMAP_CHUNK_SIZE || count % BITMAP_CHUNK_SIZE)
128		return -EINVAL;
129
130	pfn = pos * BITS_PER_BYTE;
131	if (pfn >= max_pfn)
132		return 0;
133
134	end_pfn = pfn + count * BITS_PER_BYTE;
135	if (end_pfn > max_pfn)
136		end_pfn = max_pfn;
137
138	for (; pfn < end_pfn; pfn++) {
139		bit = pfn % BITMAP_CHUNK_BITS;
140		if (!bit)
141			*out = 0ULL;
142		page = page_idle_get_page(pfn);
143		if (page) {
144			if (page_is_idle(page)) {
145				/*
146				 * The page might have been referenced via a
147				 * pte, in which case it is not idle. Clear
148				 * refs and recheck.
149				 */
150				page_idle_clear_pte_refs(page);
151				if (page_is_idle(page))
152					*out |= 1ULL << bit;
153			}
154			put_page(page);
155		}
156		if (bit == BITMAP_CHUNK_BITS - 1)
157			out++;
158		cond_resched();
159	}
160	return (char *)out - buf;
161}
162
163static ssize_t page_idle_bitmap_write(struct file *file, struct kobject *kobj,
164				      struct bin_attribute *attr, char *buf,
165				      loff_t pos, size_t count)
166{
167	const u64 *in = (u64 *)buf;
168	struct page *page;
169	unsigned long pfn, end_pfn;
170	int bit;
171
172	if (pos % BITMAP_CHUNK_SIZE || count % BITMAP_CHUNK_SIZE)
173		return -EINVAL;
174
175	pfn = pos * BITS_PER_BYTE;
176	if (pfn >= max_pfn)
177		return -ENXIO;
178
179	end_pfn = pfn + count * BITS_PER_BYTE;
180	if (end_pfn > max_pfn)
181		end_pfn = max_pfn;
182
183	for (; pfn < end_pfn; pfn++) {
184		bit = pfn % BITMAP_CHUNK_BITS;
185		if ((*in >> bit) & 1) {
186			page = page_idle_get_page(pfn);
187			if (page) {
188				page_idle_clear_pte_refs(page);
189				set_page_idle(page);
190				put_page(page);
191			}
192		}
193		if (bit == BITMAP_CHUNK_BITS - 1)
194			in++;
195		cond_resched();
196	}
197	return (char *)in - buf;
198}
199
200static struct bin_attribute page_idle_bitmap_attr =
201		__BIN_ATTR(bitmap, 0600,
202			   page_idle_bitmap_read, page_idle_bitmap_write, 0);
203
204static struct bin_attribute *page_idle_bin_attrs[] = {
205	&page_idle_bitmap_attr,
206	NULL,
207};
208
209static const struct attribute_group page_idle_attr_group = {
210	.bin_attrs = page_idle_bin_attrs,
211	.name = "page_idle",
212};
213
214#ifndef CONFIG_64BIT
215static bool need_page_idle(void)
216{
217	return true;
218}
219struct page_ext_operations page_idle_ops = {
220	.need = need_page_idle,
221};
222#endif
223
224static int __init page_idle_init(void)
225{
226	int err;
227
228	err = sysfs_create_group(mm_kobj, &page_idle_attr_group);
229	if (err) {
230		pr_err("page_idle: register sysfs failed\n");
231		return err;
232	}
233	return 0;
234}
235subsys_initcall(page_idle_init);