Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
4 */
5#include <linux/mm.h>
6#include <linux/module.h>
7
8#include <asm/pgtable.h>
9#include <asm/tlbflush.h>
10#include <asm/set_memory.h>
11
12struct page_change_data {
13 pgprot_t set_mask;
14 pgprot_t clear_mask;
15};
16
17static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
18{
19 struct page_change_data *cdata = data;
20 pte_t pte = *ptep;
21
22 pte = clear_pte_bit(pte, cdata->clear_mask);
23 pte = set_pte_bit(pte, cdata->set_mask);
24
25 set_pte_ext(ptep, pte, 0);
26 return 0;
27}
28
29static bool in_range(unsigned long start, unsigned long size,
30 unsigned long range_start, unsigned long range_end)
31{
32 return start >= range_start && start < range_end &&
33 size <= range_end - start;
34}
35
36static int change_memory_common(unsigned long addr, int numpages,
37 pgprot_t set_mask, pgprot_t clear_mask)
38{
39 unsigned long start = addr & PAGE_MASK;
40 unsigned long end = PAGE_ALIGN(addr) + numpages * PAGE_SIZE;
41 unsigned long size = end - start;
42 int ret;
43 struct page_change_data data;
44
45 WARN_ON_ONCE(start != addr);
46
47 if (!size)
48 return 0;
49
50 if (!in_range(start, size, MODULES_VADDR, MODULES_END) &&
51 !in_range(start, size, VMALLOC_START, VMALLOC_END))
52 return -EINVAL;
53
54 data.set_mask = set_mask;
55 data.clear_mask = clear_mask;
56
57 ret = apply_to_page_range(&init_mm, start, size, change_page_range,
58 &data);
59
60 flush_tlb_kernel_range(start, end);
61 return ret;
62}
63
64int set_memory_ro(unsigned long addr, int numpages)
65{
66 return change_memory_common(addr, numpages,
67 __pgprot(L_PTE_RDONLY),
68 __pgprot(0));
69}
70
71int set_memory_rw(unsigned long addr, int numpages)
72{
73 return change_memory_common(addr, numpages,
74 __pgprot(0),
75 __pgprot(L_PTE_RDONLY));
76}
77
78int set_memory_nx(unsigned long addr, int numpages)
79{
80 return change_memory_common(addr, numpages,
81 __pgprot(L_PTE_XN),
82 __pgprot(0));
83}
84
85int set_memory_x(unsigned long addr, int numpages)
86{
87 return change_memory_common(addr, numpages,
88 __pgprot(0),
89 __pgprot(L_PTE_XN));
90}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
4 */
5#include <linux/mm.h>
6#include <linux/module.h>
7
8#include <asm/tlbflush.h>
9#include <asm/set_memory.h>
10
11struct page_change_data {
12 pgprot_t set_mask;
13 pgprot_t clear_mask;
14};
15
16static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
17{
18 struct page_change_data *cdata = data;
19 pte_t pte = *ptep;
20
21 pte = clear_pte_bit(pte, cdata->clear_mask);
22 pte = set_pte_bit(pte, cdata->set_mask);
23
24 set_pte_ext(ptep, pte, 0);
25 return 0;
26}
27
28static bool in_range(unsigned long start, unsigned long size,
29 unsigned long range_start, unsigned long range_end)
30{
31 return start >= range_start && start < range_end &&
32 size <= range_end - start;
33}
34
35/*
36 * This function assumes that the range is mapped with PAGE_SIZE pages.
37 */
38static int __change_memory_common(unsigned long start, unsigned long size,
39 pgprot_t set_mask, pgprot_t clear_mask)
40{
41 struct page_change_data data;
42 int ret;
43
44 data.set_mask = set_mask;
45 data.clear_mask = clear_mask;
46
47 ret = apply_to_page_range(&init_mm, start, size, change_page_range,
48 &data);
49
50 flush_tlb_kernel_range(start, start + size);
51 return ret;
52}
53
54static int change_memory_common(unsigned long addr, int numpages,
55 pgprot_t set_mask, pgprot_t clear_mask)
56{
57 unsigned long start = addr & PAGE_MASK;
58 unsigned long end = PAGE_ALIGN(addr) + numpages * PAGE_SIZE;
59 unsigned long size = end - start;
60
61 WARN_ON_ONCE(start != addr);
62
63 if (!size)
64 return 0;
65
66 if (!in_range(start, size, MODULES_VADDR, MODULES_END) &&
67 !in_range(start, size, VMALLOC_START, VMALLOC_END))
68 return -EINVAL;
69
70 return __change_memory_common(start, size, set_mask, clear_mask);
71}
72
73int set_memory_ro(unsigned long addr, int numpages)
74{
75 return change_memory_common(addr, numpages,
76 __pgprot(L_PTE_RDONLY),
77 __pgprot(0));
78}
79
80int set_memory_rw(unsigned long addr, int numpages)
81{
82 return change_memory_common(addr, numpages,
83 __pgprot(0),
84 __pgprot(L_PTE_RDONLY));
85}
86
87int set_memory_nx(unsigned long addr, int numpages)
88{
89 return change_memory_common(addr, numpages,
90 __pgprot(L_PTE_XN),
91 __pgprot(0));
92}
93
94int set_memory_x(unsigned long addr, int numpages)
95{
96 return change_memory_common(addr, numpages,
97 __pgprot(0),
98 __pgprot(L_PTE_XN));
99}
100
101int set_memory_valid(unsigned long addr, int numpages, int enable)
102{
103 if (enable)
104 return __change_memory_common(addr, PAGE_SIZE * numpages,
105 __pgprot(L_PTE_VALID),
106 __pgprot(0));
107 else
108 return __change_memory_common(addr, PAGE_SIZE * numpages,
109 __pgprot(0),
110 __pgprot(L_PTE_VALID));
111}