Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
4 */
5#include <linux/mm.h>
6#include <linux/module.h>
7
8#include <asm/pgtable.h>
9#include <asm/tlbflush.h>
10#include <asm/set_memory.h>
11
12struct page_change_data {
13 pgprot_t set_mask;
14 pgprot_t clear_mask;
15};
16
17static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
18{
19 struct page_change_data *cdata = data;
20 pte_t pte = *ptep;
21
22 pte = clear_pte_bit(pte, cdata->clear_mask);
23 pte = set_pte_bit(pte, cdata->set_mask);
24
25 set_pte_ext(ptep, pte, 0);
26 return 0;
27}
28
29static bool in_range(unsigned long start, unsigned long size,
30 unsigned long range_start, unsigned long range_end)
31{
32 return start >= range_start && start < range_end &&
33 size <= range_end - start;
34}
35
36static int change_memory_common(unsigned long addr, int numpages,
37 pgprot_t set_mask, pgprot_t clear_mask)
38{
39 unsigned long start = addr & PAGE_MASK;
40 unsigned long end = PAGE_ALIGN(addr) + numpages * PAGE_SIZE;
41 unsigned long size = end - start;
42 int ret;
43 struct page_change_data data;
44
45 WARN_ON_ONCE(start != addr);
46
47 if (!size)
48 return 0;
49
50 if (!in_range(start, size, MODULES_VADDR, MODULES_END) &&
51 !in_range(start, size, VMALLOC_START, VMALLOC_END))
52 return -EINVAL;
53
54 data.set_mask = set_mask;
55 data.clear_mask = clear_mask;
56
57 ret = apply_to_page_range(&init_mm, start, size, change_page_range,
58 &data);
59
60 flush_tlb_kernel_range(start, end);
61 return ret;
62}
63
64int set_memory_ro(unsigned long addr, int numpages)
65{
66 return change_memory_common(addr, numpages,
67 __pgprot(L_PTE_RDONLY),
68 __pgprot(0));
69}
70
71int set_memory_rw(unsigned long addr, int numpages)
72{
73 return change_memory_common(addr, numpages,
74 __pgprot(0),
75 __pgprot(L_PTE_RDONLY));
76}
77
78int set_memory_nx(unsigned long addr, int numpages)
79{
80 return change_memory_common(addr, numpages,
81 __pgprot(L_PTE_XN),
82 __pgprot(0));
83}
84
85int set_memory_x(unsigned long addr, int numpages)
86{
87 return change_memory_common(addr, numpages,
88 __pgprot(0),
89 __pgprot(L_PTE_XN));
90}
1/*
2 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13#include <linux/mm.h>
14#include <linux/module.h>
15
16#include <asm/pgtable.h>
17#include <asm/tlbflush.h>
18
19struct page_change_data {
20 pgprot_t set_mask;
21 pgprot_t clear_mask;
22};
23
24static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr,
25 void *data)
26{
27 struct page_change_data *cdata = data;
28 pte_t pte = *ptep;
29
30 pte = clear_pte_bit(pte, cdata->clear_mask);
31 pte = set_pte_bit(pte, cdata->set_mask);
32
33 set_pte_ext(ptep, pte, 0);
34 return 0;
35}
36
37static int change_memory_common(unsigned long addr, int numpages,
38 pgprot_t set_mask, pgprot_t clear_mask)
39{
40 unsigned long start = addr;
41 unsigned long size = PAGE_SIZE*numpages;
42 unsigned long end = start + size;
43 int ret;
44 struct page_change_data data;
45
46 if (!IS_ALIGNED(addr, PAGE_SIZE)) {
47 start &= PAGE_MASK;
48 end = start + size;
49 WARN_ON_ONCE(1);
50 }
51
52 if (!numpages)
53 return 0;
54
55 if (start < MODULES_VADDR || start >= MODULES_END)
56 return -EINVAL;
57
58 if (end < MODULES_VADDR || start >= MODULES_END)
59 return -EINVAL;
60
61 data.set_mask = set_mask;
62 data.clear_mask = clear_mask;
63
64 ret = apply_to_page_range(&init_mm, start, size, change_page_range,
65 &data);
66
67 flush_tlb_kernel_range(start, end);
68 return ret;
69}
70
71int set_memory_ro(unsigned long addr, int numpages)
72{
73 return change_memory_common(addr, numpages,
74 __pgprot(L_PTE_RDONLY),
75 __pgprot(0));
76}
77
78int set_memory_rw(unsigned long addr, int numpages)
79{
80 return change_memory_common(addr, numpages,
81 __pgprot(0),
82 __pgprot(L_PTE_RDONLY));
83}
84
85int set_memory_nx(unsigned long addr, int numpages)
86{
87 return change_memory_common(addr, numpages,
88 __pgprot(L_PTE_XN),
89 __pgprot(0));
90}
91
92int set_memory_x(unsigned long addr, int numpages)
93{
94 return change_memory_common(addr, numpages,
95 __pgprot(0),
96 __pgprot(L_PTE_XN));
97}