Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/arch/arm/lib/uaccess_with_memcpy.c
4 *
5 * Written by: Lennert Buytenhek and Nicolas Pitre
6 * Copyright (C) 2009 Marvell Semiconductor
7 */
8
9#include <linux/kernel.h>
10#include <linux/ctype.h>
11#include <linux/uaccess.h>
12#include <linux/rwsem.h>
13#include <linux/mm.h>
14#include <linux/sched.h>
15#include <linux/hardirq.h> /* for in_atomic() */
16#include <linux/gfp.h>
17#include <linux/highmem.h>
18#include <linux/hugetlb.h>
19#include <asm/current.h>
20#include <asm/page.h>
21
22static int
23pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
24{
25 unsigned long addr = (unsigned long)_addr;
26 pgd_t *pgd;
27 p4d_t *p4d;
28 pmd_t *pmd;
29 pte_t *pte;
30 pud_t *pud;
31 spinlock_t *ptl;
32
33 pgd = pgd_offset(current->mm, addr);
34 if (unlikely(pgd_none(*pgd) || pgd_bad(*pgd)))
35 return 0;
36
37 p4d = p4d_offset(pgd, addr);
38 if (unlikely(p4d_none(*p4d) || p4d_bad(*p4d)))
39 return 0;
40
41 pud = pud_offset(p4d, addr);
42 if (unlikely(pud_none(*pud) || pud_bad(*pud)))
43 return 0;
44
45 pmd = pmd_offset(pud, addr);
46 if (unlikely(pmd_none(*pmd)))
47 return 0;
48
49 /*
50 * A pmd can be bad if it refers to a HugeTLB or THP page.
51 *
52 * Both THP and HugeTLB pages have the same pmd layout
53 * and should not be manipulated by the pte functions.
54 *
55 * Lock the page table for the destination and check
56 * to see that it's still huge and whether or not we will
57 * need to fault on write.
58 */
59 if (unlikely(pmd_leaf(*pmd))) {
60 ptl = ¤t->mm->page_table_lock;
61 spin_lock(ptl);
62 if (unlikely(!pmd_leaf(*pmd)
63 || pmd_hugewillfault(*pmd))) {
64 spin_unlock(ptl);
65 return 0;
66 }
67
68 *ptep = NULL;
69 *ptlp = ptl;
70 return 1;
71 }
72
73 if (unlikely(pmd_bad(*pmd)))
74 return 0;
75
76 pte = pte_offset_map_lock(current->mm, pmd, addr, &ptl);
77 if (unlikely(!pte))
78 return 0;
79
80 if (unlikely(!pte_present(*pte) || !pte_young(*pte) ||
81 !pte_write(*pte) || !pte_dirty(*pte))) {
82 pte_unmap_unlock(pte, ptl);
83 return 0;
84 }
85
86 *ptep = pte;
87 *ptlp = ptl;
88
89 return 1;
90}
91
92static unsigned long noinline
93__copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
94{
95 unsigned long ua_flags;
96 int atomic;
97
98 /* the mmap semaphore is taken only if not in an atomic context */
99 atomic = faulthandler_disabled();
100
101 if (!atomic)
102 mmap_read_lock(current->mm);
103 while (n) {
104 pte_t *pte;
105 spinlock_t *ptl;
106 int tocopy;
107
108 while (!pin_page_for_write(to, &pte, &ptl)) {
109 if (!atomic)
110 mmap_read_unlock(current->mm);
111 if (__put_user(0, (char __user *)to))
112 goto out;
113 if (!atomic)
114 mmap_read_lock(current->mm);
115 }
116
117 tocopy = (~(unsigned long)to & ~PAGE_MASK) + 1;
118 if (tocopy > n)
119 tocopy = n;
120
121 ua_flags = uaccess_save_and_enable();
122 __memcpy((void *)to, from, tocopy);
123 uaccess_restore(ua_flags);
124 to += tocopy;
125 from += tocopy;
126 n -= tocopy;
127
128 if (pte)
129 pte_unmap_unlock(pte, ptl);
130 else
131 spin_unlock(ptl);
132 }
133 if (!atomic)
134 mmap_read_unlock(current->mm);
135
136out:
137 return n;
138}
139
140unsigned long
141arm_copy_to_user(void __user *to, const void *from, unsigned long n)
142{
143 /*
144 * This test is stubbed out of the main function above to keep
145 * the overhead for small copies low by avoiding a large
146 * register dump on the stack just to reload them right away.
147 * With frame pointer disabled, tail call optimization kicks in
148 * as well making this test almost invisible.
149 */
150 if (n < 64) {
151 unsigned long ua_flags = uaccess_save_and_enable();
152 n = __copy_to_user_std(to, from, n);
153 uaccess_restore(ua_flags);
154 } else {
155 n = __copy_to_user_memcpy(uaccess_mask_range_ptr(to, n),
156 from, n);
157 }
158 return n;
159}
160
161static unsigned long noinline
162__clear_user_memset(void __user *addr, unsigned long n)
163{
164 unsigned long ua_flags;
165
166 mmap_read_lock(current->mm);
167 while (n) {
168 pte_t *pte;
169 spinlock_t *ptl;
170 int tocopy;
171
172 while (!pin_page_for_write(addr, &pte, &ptl)) {
173 mmap_read_unlock(current->mm);
174 if (__put_user(0, (char __user *)addr))
175 goto out;
176 mmap_read_lock(current->mm);
177 }
178
179 tocopy = (~(unsigned long)addr & ~PAGE_MASK) + 1;
180 if (tocopy > n)
181 tocopy = n;
182
183 ua_flags = uaccess_save_and_enable();
184 __memset((void *)addr, 0, tocopy);
185 uaccess_restore(ua_flags);
186 addr += tocopy;
187 n -= tocopy;
188
189 if (pte)
190 pte_unmap_unlock(pte, ptl);
191 else
192 spin_unlock(ptl);
193 }
194 mmap_read_unlock(current->mm);
195
196out:
197 return n;
198}
199
200unsigned long arm_clear_user(void __user *addr, unsigned long n)
201{
202 /* See rational for this in __copy_to_user() above. */
203 if (n < 64) {
204 unsigned long ua_flags = uaccess_save_and_enable();
205 n = __clear_user_std(addr, n);
206 uaccess_restore(ua_flags);
207 } else {
208 n = __clear_user_memset(addr, n);
209 }
210 return n;
211}
212
213#if 0
214
215/*
216 * This code is disabled by default, but kept around in case the chosen
217 * thresholds need to be revalidated. Some overhead (small but still)
218 * would be implied by a runtime determined variable threshold, and
219 * so far the measurement on concerned targets didn't show a worthwhile
220 * variation.
221 *
222 * Note that a fairly precise sched_clock() implementation is needed
223 * for results to make some sense.
224 */
225
226#include <linux/vmalloc.h>
227
228static int __init test_size_treshold(void)
229{
230 struct page *src_page, *dst_page;
231 void *user_ptr, *kernel_ptr;
232 unsigned long long t0, t1, t2;
233 int size, ret;
234
235 ret = -ENOMEM;
236 src_page = alloc_page(GFP_KERNEL);
237 if (!src_page)
238 goto no_src;
239 dst_page = alloc_page(GFP_KERNEL);
240 if (!dst_page)
241 goto no_dst;
242 kernel_ptr = page_address(src_page);
243 user_ptr = vmap(&dst_page, 1, VM_IOREMAP, __pgprot(__PAGE_COPY));
244 if (!user_ptr)
245 goto no_vmap;
246
247 /* warm up the src page dcache */
248 ret = __copy_to_user_memcpy(user_ptr, kernel_ptr, PAGE_SIZE);
249
250 for (size = PAGE_SIZE; size >= 4; size /= 2) {
251 t0 = sched_clock();
252 ret |= __copy_to_user_memcpy(user_ptr, kernel_ptr, size);
253 t1 = sched_clock();
254 ret |= __copy_to_user_std(user_ptr, kernel_ptr, size);
255 t2 = sched_clock();
256 printk("copy_to_user: %d %llu %llu\n", size, t1 - t0, t2 - t1);
257 }
258
259 for (size = PAGE_SIZE; size >= 4; size /= 2) {
260 t0 = sched_clock();
261 ret |= __clear_user_memset(user_ptr, size);
262 t1 = sched_clock();
263 ret |= __clear_user_std(user_ptr, size);
264 t2 = sched_clock();
265 printk("clear_user: %d %llu %llu\n", size, t1 - t0, t2 - t1);
266 }
267
268 if (ret)
269 ret = -EFAULT;
270
271 vunmap(user_ptr);
272no_vmap:
273 put_page(dst_page);
274no_dst:
275 put_page(src_page);
276no_src:
277 return ret;
278}
279
280subsys_initcall(test_size_treshold);
281
282#endif
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/arch/arm/lib/uaccess_with_memcpy.c
4 *
5 * Written by: Lennert Buytenhek and Nicolas Pitre
6 * Copyright (C) 2009 Marvell Semiconductor
7 */
8
9#include <linux/kernel.h>
10#include <linux/ctype.h>
11#include <linux/uaccess.h>
12#include <linux/rwsem.h>
13#include <linux/mm.h>
14#include <linux/sched.h>
15#include <linux/hardirq.h> /* for in_atomic() */
16#include <linux/gfp.h>
17#include <linux/highmem.h>
18#include <linux/hugetlb.h>
19#include <asm/current.h>
20#include <asm/page.h>
21
22static int
23pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
24{
25 unsigned long addr = (unsigned long)_addr;
26 pgd_t *pgd;
27 p4d_t *p4d;
28 pmd_t *pmd;
29 pte_t *pte;
30 pud_t *pud;
31 spinlock_t *ptl;
32
33 pgd = pgd_offset(current->mm, addr);
34 if (unlikely(pgd_none(*pgd) || pgd_bad(*pgd)))
35 return 0;
36
37 p4d = p4d_offset(pgd, addr);
38 if (unlikely(p4d_none(*p4d) || p4d_bad(*p4d)))
39 return 0;
40
41 pud = pud_offset(p4d, addr);
42 if (unlikely(pud_none(*pud) || pud_bad(*pud)))
43 return 0;
44
45 pmd = pmd_offset(pud, addr);
46 if (unlikely(pmd_none(*pmd)))
47 return 0;
48
49 /*
50 * A pmd can be bad if it refers to a HugeTLB or THP page.
51 *
52 * Both THP and HugeTLB pages have the same pmd layout
53 * and should not be manipulated by the pte functions.
54 *
55 * Lock the page table for the destination and check
56 * to see that it's still huge and whether or not we will
57 * need to fault on write.
58 */
59 if (unlikely(pmd_thp_or_huge(*pmd))) {
60 ptl = ¤t->mm->page_table_lock;
61 spin_lock(ptl);
62 if (unlikely(!pmd_thp_or_huge(*pmd)
63 || pmd_hugewillfault(*pmd))) {
64 spin_unlock(ptl);
65 return 0;
66 }
67
68 *ptep = NULL;
69 *ptlp = ptl;
70 return 1;
71 }
72
73 if (unlikely(pmd_bad(*pmd)))
74 return 0;
75
76 pte = pte_offset_map_lock(current->mm, pmd, addr, &ptl);
77 if (unlikely(!pte_present(*pte) || !pte_young(*pte) ||
78 !pte_write(*pte) || !pte_dirty(*pte))) {
79 pte_unmap_unlock(pte, ptl);
80 return 0;
81 }
82
83 *ptep = pte;
84 *ptlp = ptl;
85
86 return 1;
87}
88
89static unsigned long noinline
90__copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
91{
92 unsigned long ua_flags;
93 int atomic;
94
95 if (uaccess_kernel()) {
96 memcpy((void *)to, from, n);
97 return 0;
98 }
99
100 /* the mmap semaphore is taken only if not in an atomic context */
101 atomic = faulthandler_disabled();
102
103 if (!atomic)
104 mmap_read_lock(current->mm);
105 while (n) {
106 pte_t *pte;
107 spinlock_t *ptl;
108 int tocopy;
109
110 while (!pin_page_for_write(to, &pte, &ptl)) {
111 if (!atomic)
112 mmap_read_unlock(current->mm);
113 if (__put_user(0, (char __user *)to))
114 goto out;
115 if (!atomic)
116 mmap_read_lock(current->mm);
117 }
118
119 tocopy = (~(unsigned long)to & ~PAGE_MASK) + 1;
120 if (tocopy > n)
121 tocopy = n;
122
123 ua_flags = uaccess_save_and_enable();
124 memcpy((void *)to, from, tocopy);
125 uaccess_restore(ua_flags);
126 to += tocopy;
127 from += tocopy;
128 n -= tocopy;
129
130 if (pte)
131 pte_unmap_unlock(pte, ptl);
132 else
133 spin_unlock(ptl);
134 }
135 if (!atomic)
136 mmap_read_unlock(current->mm);
137
138out:
139 return n;
140}
141
142unsigned long
143arm_copy_to_user(void __user *to, const void *from, unsigned long n)
144{
145 /*
146 * This test is stubbed out of the main function above to keep
147 * the overhead for small copies low by avoiding a large
148 * register dump on the stack just to reload them right away.
149 * With frame pointer disabled, tail call optimization kicks in
150 * as well making this test almost invisible.
151 */
152 if (n < 64) {
153 unsigned long ua_flags = uaccess_save_and_enable();
154 n = __copy_to_user_std(to, from, n);
155 uaccess_restore(ua_flags);
156 } else {
157 n = __copy_to_user_memcpy(uaccess_mask_range_ptr(to, n),
158 from, n);
159 }
160 return n;
161}
162
163static unsigned long noinline
164__clear_user_memset(void __user *addr, unsigned long n)
165{
166 unsigned long ua_flags;
167
168 if (uaccess_kernel()) {
169 memset((void *)addr, 0, n);
170 return 0;
171 }
172
173 mmap_read_lock(current->mm);
174 while (n) {
175 pte_t *pte;
176 spinlock_t *ptl;
177 int tocopy;
178
179 while (!pin_page_for_write(addr, &pte, &ptl)) {
180 mmap_read_unlock(current->mm);
181 if (__put_user(0, (char __user *)addr))
182 goto out;
183 mmap_read_lock(current->mm);
184 }
185
186 tocopy = (~(unsigned long)addr & ~PAGE_MASK) + 1;
187 if (tocopy > n)
188 tocopy = n;
189
190 ua_flags = uaccess_save_and_enable();
191 memset((void *)addr, 0, tocopy);
192 uaccess_restore(ua_flags);
193 addr += tocopy;
194 n -= tocopy;
195
196 if (pte)
197 pte_unmap_unlock(pte, ptl);
198 else
199 spin_unlock(ptl);
200 }
201 mmap_read_unlock(current->mm);
202
203out:
204 return n;
205}
206
207unsigned long arm_clear_user(void __user *addr, unsigned long n)
208{
209 /* See rational for this in __copy_to_user() above. */
210 if (n < 64) {
211 unsigned long ua_flags = uaccess_save_and_enable();
212 n = __clear_user_std(addr, n);
213 uaccess_restore(ua_flags);
214 } else {
215 n = __clear_user_memset(addr, n);
216 }
217 return n;
218}
219
220#if 0
221
222/*
223 * This code is disabled by default, but kept around in case the chosen
224 * thresholds need to be revalidated. Some overhead (small but still)
225 * would be implied by a runtime determined variable threshold, and
226 * so far the measurement on concerned targets didn't show a worthwhile
227 * variation.
228 *
229 * Note that a fairly precise sched_clock() implementation is needed
230 * for results to make some sense.
231 */
232
233#include <linux/vmalloc.h>
234
235static int __init test_size_treshold(void)
236{
237 struct page *src_page, *dst_page;
238 void *user_ptr, *kernel_ptr;
239 unsigned long long t0, t1, t2;
240 int size, ret;
241
242 ret = -ENOMEM;
243 src_page = alloc_page(GFP_KERNEL);
244 if (!src_page)
245 goto no_src;
246 dst_page = alloc_page(GFP_KERNEL);
247 if (!dst_page)
248 goto no_dst;
249 kernel_ptr = page_address(src_page);
250 user_ptr = vmap(&dst_page, 1, VM_IOREMAP, __pgprot(__P010));
251 if (!user_ptr)
252 goto no_vmap;
253
254 /* warm up the src page dcache */
255 ret = __copy_to_user_memcpy(user_ptr, kernel_ptr, PAGE_SIZE);
256
257 for (size = PAGE_SIZE; size >= 4; size /= 2) {
258 t0 = sched_clock();
259 ret |= __copy_to_user_memcpy(user_ptr, kernel_ptr, size);
260 t1 = sched_clock();
261 ret |= __copy_to_user_std(user_ptr, kernel_ptr, size);
262 t2 = sched_clock();
263 printk("copy_to_user: %d %llu %llu\n", size, t1 - t0, t2 - t1);
264 }
265
266 for (size = PAGE_SIZE; size >= 4; size /= 2) {
267 t0 = sched_clock();
268 ret |= __clear_user_memset(user_ptr, size);
269 t1 = sched_clock();
270 ret |= __clear_user_std(user_ptr, size);
271 t2 = sched_clock();
272 printk("clear_user: %d %llu %llu\n", size, t1 - t0, t2 - t1);
273 }
274
275 if (ret)
276 ret = -EFAULT;
277
278 vunmap(user_ptr);
279no_vmap:
280 put_page(dst_page);
281no_dst:
282 put_page(src_page);
283no_src:
284 return ret;
285}
286
287subsys_initcall(test_size_treshold);
288
289#endif