Linux Audio

Check our new training course

Open-source upstreaming

Need help get the support for your hardware in upstream Linux?
Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2019 SiFive
  4 */
  5
  6#include <linux/pagewalk.h>
  7#include <linux/pgtable.h>
 
  8#include <asm/tlbflush.h>
  9#include <asm/bitops.h>
 10#include <asm/set_memory.h>
 11
 12struct pageattr_masks {
 13	pgprot_t set_mask;
 14	pgprot_t clear_mask;
 15};
 16
 17static unsigned long set_pageattr_masks(unsigned long val, struct mm_walk *walk)
 18{
 19	struct pageattr_masks *masks = walk->private;
 20	unsigned long new_val = val;
 21
 22	new_val &= ~(pgprot_val(masks->clear_mask));
 23	new_val |= (pgprot_val(masks->set_mask));
 24
 25	return new_val;
 26}
 27
 28static int pageattr_pgd_entry(pgd_t *pgd, unsigned long addr,
 29			      unsigned long next, struct mm_walk *walk)
 30{
 31	pgd_t val = READ_ONCE(*pgd);
 32
 33	if (pgd_leaf(val)) {
 34		val = __pgd(set_pageattr_masks(pgd_val(val), walk));
 35		set_pgd(pgd, val);
 36	}
 37
 38	return 0;
 39}
 40
 41static int pageattr_p4d_entry(p4d_t *p4d, unsigned long addr,
 42			      unsigned long next, struct mm_walk *walk)
 43{
 44	p4d_t val = READ_ONCE(*p4d);
 45
 46	if (p4d_leaf(val)) {
 47		val = __p4d(set_pageattr_masks(p4d_val(val), walk));
 48		set_p4d(p4d, val);
 49	}
 50
 51	return 0;
 52}
 53
 54static int pageattr_pud_entry(pud_t *pud, unsigned long addr,
 55			      unsigned long next, struct mm_walk *walk)
 56{
 57	pud_t val = READ_ONCE(*pud);
 58
 59	if (pud_leaf(val)) {
 60		val = __pud(set_pageattr_masks(pud_val(val), walk));
 61		set_pud(pud, val);
 62	}
 63
 64	return 0;
 65}
 66
 67static int pageattr_pmd_entry(pmd_t *pmd, unsigned long addr,
 68			      unsigned long next, struct mm_walk *walk)
 69{
 70	pmd_t val = READ_ONCE(*pmd);
 71
 72	if (pmd_leaf(val)) {
 73		val = __pmd(set_pageattr_masks(pmd_val(val), walk));
 74		set_pmd(pmd, val);
 75	}
 76
 77	return 0;
 78}
 79
 80static int pageattr_pte_entry(pte_t *pte, unsigned long addr,
 81			      unsigned long next, struct mm_walk *walk)
 82{
 83	pte_t val = READ_ONCE(*pte);
 84
 85	val = __pte(set_pageattr_masks(pte_val(val), walk));
 86	set_pte(pte, val);
 87
 88	return 0;
 89}
 90
 91static int pageattr_pte_hole(unsigned long addr, unsigned long next,
 92			     int depth, struct mm_walk *walk)
 93{
 94	/* Nothing to do here */
 95	return 0;
 96}
 97
 98static const struct mm_walk_ops pageattr_ops = {
 99	.pgd_entry = pageattr_pgd_entry,
100	.p4d_entry = pageattr_p4d_entry,
101	.pud_entry = pageattr_pud_entry,
102	.pmd_entry = pageattr_pmd_entry,
103	.pte_entry = pageattr_pte_entry,
104	.pte_hole = pageattr_pte_hole,
 
105};
106
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
107static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask,
108			pgprot_t clear_mask)
109{
110	int ret;
111	unsigned long start = addr;
112	unsigned long end = start + PAGE_SIZE * numpages;
 
 
113	struct pageattr_masks masks = {
114		.set_mask = set_mask,
115		.clear_mask = clear_mask
116	};
117
118	if (!numpages)
119		return 0;
120
121	mmap_read_lock(&init_mm);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122	ret =  walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL,
123				     &masks);
124	mmap_read_unlock(&init_mm);
 
 
 
 
 
 
 
 
 
 
 
 
 
125
126	flush_tlb_kernel_range(start, end);
 
127
128	return ret;
129}
130
 
 
 
 
 
 
131int set_memory_ro(unsigned long addr, int numpages)
132{
133	return __set_memory(addr, numpages, __pgprot(_PAGE_READ),
134			    __pgprot(_PAGE_WRITE));
135}
136
137int set_memory_rw(unsigned long addr, int numpages)
138{
139	return __set_memory(addr, numpages, __pgprot(_PAGE_READ | _PAGE_WRITE),
140			    __pgprot(0));
141}
142
143int set_memory_x(unsigned long addr, int numpages)
144{
145	return __set_memory(addr, numpages, __pgprot(_PAGE_EXEC), __pgprot(0));
146}
147
148int set_memory_nx(unsigned long addr, int numpages)
149{
150	return __set_memory(addr, numpages, __pgprot(0), __pgprot(_PAGE_EXEC));
151}
152
153int set_direct_map_invalid_noflush(struct page *page)
154{
155	int ret;
156	unsigned long start = (unsigned long)page_address(page);
157	unsigned long end = start + PAGE_SIZE;
158	struct pageattr_masks masks = {
159		.set_mask = __pgprot(0),
160		.clear_mask = __pgprot(_PAGE_PRESENT)
161	};
162
163	mmap_read_lock(&init_mm);
164	ret = walk_page_range(&init_mm, start, end, &pageattr_ops, &masks);
165	mmap_read_unlock(&init_mm);
166
167	return ret;
168}
169
170int set_direct_map_default_noflush(struct page *page)
171{
172	int ret;
173	unsigned long start = (unsigned long)page_address(page);
174	unsigned long end = start + PAGE_SIZE;
175	struct pageattr_masks masks = {
176		.set_mask = PAGE_KERNEL,
177		.clear_mask = __pgprot(0)
178	};
179
180	mmap_read_lock(&init_mm);
181	ret = walk_page_range(&init_mm, start, end, &pageattr_ops, &masks);
182	mmap_read_unlock(&init_mm);
183
184	return ret;
185}
186
 
187void __kernel_map_pages(struct page *page, int numpages, int enable)
188{
189	if (!debug_pagealloc_enabled())
190		return;
191
192	if (enable)
193		__set_memory((unsigned long)page_address(page), numpages,
194			     __pgprot(_PAGE_PRESENT), __pgprot(0));
195	else
196		__set_memory((unsigned long)page_address(page), numpages,
197			     __pgprot(0), __pgprot(_PAGE_PRESENT));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
198}
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2019 SiFive
  4 */
  5
  6#include <linux/pagewalk.h>
  7#include <linux/pgtable.h>
  8#include <linux/vmalloc.h>
  9#include <asm/tlbflush.h>
 10#include <asm/bitops.h>
 11#include <asm/set_memory.h>
 12
 13struct pageattr_masks {
 14	pgprot_t set_mask;
 15	pgprot_t clear_mask;
 16};
 17
 18static unsigned long set_pageattr_masks(unsigned long val, struct mm_walk *walk)
 19{
 20	struct pageattr_masks *masks = walk->private;
 21	unsigned long new_val = val;
 22
 23	new_val &= ~(pgprot_val(masks->clear_mask));
 24	new_val |= (pgprot_val(masks->set_mask));
 25
 26	return new_val;
 27}
 28
 
 
 
 
 
 
 
 
 
 
 
 
 
 29static int pageattr_p4d_entry(p4d_t *p4d, unsigned long addr,
 30			      unsigned long next, struct mm_walk *walk)
 31{
 32	p4d_t val = p4dp_get(p4d);
 33
 34	if (p4d_leaf(val)) {
 35		val = __p4d(set_pageattr_masks(p4d_val(val), walk));
 36		set_p4d(p4d, val);
 37	}
 38
 39	return 0;
 40}
 41
 42static int pageattr_pud_entry(pud_t *pud, unsigned long addr,
 43			      unsigned long next, struct mm_walk *walk)
 44{
 45	pud_t val = pudp_get(pud);
 46
 47	if (pud_leaf(val)) {
 48		val = __pud(set_pageattr_masks(pud_val(val), walk));
 49		set_pud(pud, val);
 50	}
 51
 52	return 0;
 53}
 54
 55static int pageattr_pmd_entry(pmd_t *pmd, unsigned long addr,
 56			      unsigned long next, struct mm_walk *walk)
 57{
 58	pmd_t val = pmdp_get(pmd);
 59
 60	if (pmd_leaf(val)) {
 61		val = __pmd(set_pageattr_masks(pmd_val(val), walk));
 62		set_pmd(pmd, val);
 63	}
 64
 65	return 0;
 66}
 67
 68static int pageattr_pte_entry(pte_t *pte, unsigned long addr,
 69			      unsigned long next, struct mm_walk *walk)
 70{
 71	pte_t val = ptep_get(pte);
 72
 73	val = __pte(set_pageattr_masks(pte_val(val), walk));
 74	set_pte(pte, val);
 75
 76	return 0;
 77}
 78
 79static int pageattr_pte_hole(unsigned long addr, unsigned long next,
 80			     int depth, struct mm_walk *walk)
 81{
 82	/* Nothing to do here */
 83	return 0;
 84}
 85
 86static const struct mm_walk_ops pageattr_ops = {
 
 87	.p4d_entry = pageattr_p4d_entry,
 88	.pud_entry = pageattr_pud_entry,
 89	.pmd_entry = pageattr_pmd_entry,
 90	.pte_entry = pageattr_pte_entry,
 91	.pte_hole = pageattr_pte_hole,
 92	.walk_lock = PGWALK_RDLOCK,
 93};
 94
 95#ifdef CONFIG_64BIT
 96static int __split_linear_mapping_pmd(pud_t *pudp,
 97				      unsigned long vaddr, unsigned long end)
 98{
 99	pmd_t *pmdp;
100	unsigned long next;
101
102	pmdp = pmd_offset(pudp, vaddr);
103
104	do {
105		next = pmd_addr_end(vaddr, end);
106
107		if (next - vaddr >= PMD_SIZE &&
108		    vaddr <= (vaddr & PMD_MASK) && end >= next)
109			continue;
110
111		if (pmd_leaf(pmdp_get(pmdp))) {
112			struct page *pte_page;
113			unsigned long pfn = _pmd_pfn(pmdp_get(pmdp));
114			pgprot_t prot = __pgprot(pmd_val(pmdp_get(pmdp)) & ~_PAGE_PFN_MASK);
115			pte_t *ptep_new;
116			int i;
117
118			pte_page = alloc_page(GFP_KERNEL);
119			if (!pte_page)
120				return -ENOMEM;
121
122			ptep_new = (pte_t *)page_address(pte_page);
123			for (i = 0; i < PTRS_PER_PTE; ++i, ++ptep_new)
124				set_pte(ptep_new, pfn_pte(pfn + i, prot));
125
126			smp_wmb();
127
128			set_pmd(pmdp, pfn_pmd(page_to_pfn(pte_page), PAGE_TABLE));
129		}
130	} while (pmdp++, vaddr = next, vaddr != end);
131
132	return 0;
133}
134
135static int __split_linear_mapping_pud(p4d_t *p4dp,
136				      unsigned long vaddr, unsigned long end)
137{
138	pud_t *pudp;
139	unsigned long next;
140	int ret;
141
142	pudp = pud_offset(p4dp, vaddr);
143
144	do {
145		next = pud_addr_end(vaddr, end);
146
147		if (next - vaddr >= PUD_SIZE &&
148		    vaddr <= (vaddr & PUD_MASK) && end >= next)
149			continue;
150
151		if (pud_leaf(pudp_get(pudp))) {
152			struct page *pmd_page;
153			unsigned long pfn = _pud_pfn(pudp_get(pudp));
154			pgprot_t prot = __pgprot(pud_val(pudp_get(pudp)) & ~_PAGE_PFN_MASK);
155			pmd_t *pmdp_new;
156			int i;
157
158			pmd_page = alloc_page(GFP_KERNEL);
159			if (!pmd_page)
160				return -ENOMEM;
161
162			pmdp_new = (pmd_t *)page_address(pmd_page);
163			for (i = 0; i < PTRS_PER_PMD; ++i, ++pmdp_new)
164				set_pmd(pmdp_new,
165					pfn_pmd(pfn + ((i * PMD_SIZE) >> PAGE_SHIFT), prot));
166
167			smp_wmb();
168
169			set_pud(pudp, pfn_pud(page_to_pfn(pmd_page), PAGE_TABLE));
170		}
171
172		ret = __split_linear_mapping_pmd(pudp, vaddr, next);
173		if (ret)
174			return ret;
175	} while (pudp++, vaddr = next, vaddr != end);
176
177	return 0;
178}
179
180static int __split_linear_mapping_p4d(pgd_t *pgdp,
181				      unsigned long vaddr, unsigned long end)
182{
183	p4d_t *p4dp;
184	unsigned long next;
185	int ret;
186
187	p4dp = p4d_offset(pgdp, vaddr);
188
189	do {
190		next = p4d_addr_end(vaddr, end);
191
192		/*
193		 * If [vaddr; end] contains [vaddr & P4D_MASK; next], we don't
194		 * need to split, we'll change the protections on the whole P4D.
195		 */
196		if (next - vaddr >= P4D_SIZE &&
197		    vaddr <= (vaddr & P4D_MASK) && end >= next)
198			continue;
199
200		if (p4d_leaf(p4dp_get(p4dp))) {
201			struct page *pud_page;
202			unsigned long pfn = _p4d_pfn(p4dp_get(p4dp));
203			pgprot_t prot = __pgprot(p4d_val(p4dp_get(p4dp)) & ~_PAGE_PFN_MASK);
204			pud_t *pudp_new;
205			int i;
206
207			pud_page = alloc_page(GFP_KERNEL);
208			if (!pud_page)
209				return -ENOMEM;
210
211			/*
212			 * Fill the pud level with leaf puds that have the same
213			 * protections as the leaf p4d.
214			 */
215			pudp_new = (pud_t *)page_address(pud_page);
216			for (i = 0; i < PTRS_PER_PUD; ++i, ++pudp_new)
217				set_pud(pudp_new,
218					pfn_pud(pfn + ((i * PUD_SIZE) >> PAGE_SHIFT), prot));
219
220			/*
221			 * Make sure the pud filling is not reordered with the
222			 * p4d store which could result in seeing a partially
223			 * filled pud level.
224			 */
225			smp_wmb();
226
227			set_p4d(p4dp, pfn_p4d(page_to_pfn(pud_page), PAGE_TABLE));
228		}
229
230		ret = __split_linear_mapping_pud(p4dp, vaddr, next);
231		if (ret)
232			return ret;
233	} while (p4dp++, vaddr = next, vaddr != end);
234
235	return 0;
236}
237
238static int __split_linear_mapping_pgd(pgd_t *pgdp,
239				      unsigned long vaddr,
240				      unsigned long end)
241{
242	unsigned long next;
243	int ret;
244
245	do {
246		next = pgd_addr_end(vaddr, end);
247		/* We never use PGD mappings for the linear mapping */
248		ret = __split_linear_mapping_p4d(pgdp, vaddr, next);
249		if (ret)
250			return ret;
251	} while (pgdp++, vaddr = next, vaddr != end);
252
253	return 0;
254}
255
256static int split_linear_mapping(unsigned long start, unsigned long end)
257{
258	return __split_linear_mapping_pgd(pgd_offset_k(start), start, end);
259}
260#endif	/* CONFIG_64BIT */
261
262static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask,
263			pgprot_t clear_mask)
264{
265	int ret;
266	unsigned long start = addr;
267	unsigned long end = start + PAGE_SIZE * numpages;
268	unsigned long __maybe_unused lm_start;
269	unsigned long __maybe_unused lm_end;
270	struct pageattr_masks masks = {
271		.set_mask = set_mask,
272		.clear_mask = clear_mask
273	};
274
275	if (!numpages)
276		return 0;
277
278	mmap_write_lock(&init_mm);
279
280#ifdef CONFIG_64BIT
281	/*
282	 * We are about to change the permissions of a kernel mapping, we must
283	 * apply the same changes to its linear mapping alias, which may imply
284	 * splitting a huge mapping.
285	 */
286
287	if (is_vmalloc_or_module_addr((void *)start)) {
288		struct vm_struct *area = NULL;
289		int i, page_start;
290
291		area = find_vm_area((void *)start);
292		page_start = (start - (unsigned long)area->addr) >> PAGE_SHIFT;
293
294		for (i = page_start; i < page_start + numpages; ++i) {
295			lm_start = (unsigned long)page_address(area->pages[i]);
296			lm_end = lm_start + PAGE_SIZE;
297
298			ret = split_linear_mapping(lm_start, lm_end);
299			if (ret)
300				goto unlock;
301
302			ret = walk_page_range_novma(&init_mm, lm_start, lm_end,
303						    &pageattr_ops, NULL, &masks);
304			if (ret)
305				goto unlock;
306		}
307	} else if (is_kernel_mapping(start) || is_linear_mapping(start)) {
308		if (is_kernel_mapping(start)) {
309			lm_start = (unsigned long)lm_alias(start);
310			lm_end = (unsigned long)lm_alias(end);
311		} else {
312			lm_start = start;
313			lm_end = end;
314		}
315
316		ret = split_linear_mapping(lm_start, lm_end);
317		if (ret)
318			goto unlock;
319
320		ret = walk_page_range_novma(&init_mm, lm_start, lm_end,
321					    &pageattr_ops, NULL, &masks);
322		if (ret)
323			goto unlock;
324	}
325
326	ret =  walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL,
327				     &masks);
328
329unlock:
330	mmap_write_unlock(&init_mm);
331
332	/*
333	 * We can't use flush_tlb_kernel_range() here as we may have split a
334	 * hugepage that is larger than that, so let's flush everything.
335	 */
336	flush_tlb_all();
337#else
338	ret =  walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL,
339				     &masks);
340
341	mmap_write_unlock(&init_mm);
342
343	flush_tlb_kernel_range(start, end);
344#endif
345
346	return ret;
347}
348
349int set_memory_rw_nx(unsigned long addr, int numpages)
350{
351	return __set_memory(addr, numpages, __pgprot(_PAGE_READ | _PAGE_WRITE),
352			    __pgprot(_PAGE_EXEC));
353}
354
355int set_memory_ro(unsigned long addr, int numpages)
356{
357	return __set_memory(addr, numpages, __pgprot(_PAGE_READ),
358			    __pgprot(_PAGE_WRITE));
359}
360
361int set_memory_rw(unsigned long addr, int numpages)
362{
363	return __set_memory(addr, numpages, __pgprot(_PAGE_READ | _PAGE_WRITE),
364			    __pgprot(0));
365}
366
367int set_memory_x(unsigned long addr, int numpages)
368{
369	return __set_memory(addr, numpages, __pgprot(_PAGE_EXEC), __pgprot(0));
370}
371
372int set_memory_nx(unsigned long addr, int numpages)
373{
374	return __set_memory(addr, numpages, __pgprot(0), __pgprot(_PAGE_EXEC));
375}
376
377int set_direct_map_invalid_noflush(struct page *page)
378{
379	return __set_memory((unsigned long)page_address(page), 1,
380			    __pgprot(0), __pgprot(_PAGE_PRESENT));
 
 
 
 
 
 
 
 
 
 
 
381}
382
383int set_direct_map_default_noflush(struct page *page)
384{
385	return __set_memory((unsigned long)page_address(page), 1,
386			    PAGE_KERNEL, __pgprot(_PAGE_EXEC));
 
 
 
 
 
 
 
 
 
 
 
387}
388
389#ifdef CONFIG_DEBUG_PAGEALLOC
390void __kernel_map_pages(struct page *page, int numpages, int enable)
391{
392	if (!debug_pagealloc_enabled())
393		return;
394
395	if (enable)
396		__set_memory((unsigned long)page_address(page), numpages,
397			     __pgprot(_PAGE_PRESENT), __pgprot(0));
398	else
399		__set_memory((unsigned long)page_address(page), numpages,
400			     __pgprot(0), __pgprot(_PAGE_PRESENT));
401}
402#endif
403
404bool kernel_page_present(struct page *page)
405{
406	unsigned long addr = (unsigned long)page_address(page);
407	pgd_t *pgd;
408	pud_t *pud;
409	p4d_t *p4d;
410	pmd_t *pmd;
411	pte_t *pte;
412
413	pgd = pgd_offset_k(addr);
414	if (!pgd_present(pgdp_get(pgd)))
415		return false;
416	if (pgd_leaf(pgdp_get(pgd)))
417		return true;
418
419	p4d = p4d_offset(pgd, addr);
420	if (!p4d_present(p4dp_get(p4d)))
421		return false;
422	if (p4d_leaf(p4dp_get(p4d)))
423		return true;
424
425	pud = pud_offset(p4d, addr);
426	if (!pud_present(pudp_get(pud)))
427		return false;
428	if (pud_leaf(pudp_get(pud)))
429		return true;
430
431	pmd = pmd_offset(pud, addr);
432	if (!pmd_present(pmdp_get(pmd)))
433		return false;
434	if (pmd_leaf(pmdp_get(pmd)))
435		return true;
436
437	pte = pte_offset_kernel(pmd, addr);
438	return pte_present(ptep_get(pte));
439}