Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  4 */
  5
  6#include <linux/err.h>
  7#include <linux/highmem.h>
  8#include <linux/mm.h>
  9#include <linux/module.h>
 10#include <linux/sched.h>
 11#include <asm/current.h>
 12#include <asm/page.h>
 13#include <kern_util.h>
 14#include <asm/futex.h>
 15#include <os.h>
 16
 17pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr)
 18{
 19	pgd_t *pgd;
 20	p4d_t *p4d;
 21	pud_t *pud;
 22	pmd_t *pmd;
 23
 24	if (mm == NULL)
 25		return NULL;
 26
 27	pgd = pgd_offset(mm, addr);
 28	if (!pgd_present(*pgd))
 29		return NULL;
 30
 31	p4d = p4d_offset(pgd, addr);
 32	if (!p4d_present(*p4d))
 33		return NULL;
 34
 35	pud = pud_offset(p4d, addr);
 36	if (!pud_present(*pud))
 37		return NULL;
 38
 39	pmd = pmd_offset(pud, addr);
 40	if (!pmd_present(*pmd))
 41		return NULL;
 42
 43	return pte_offset_kernel(pmd, addr);
 44}
 45
 46static pte_t *maybe_map(unsigned long virt, int is_write)
 47{
 48	pte_t *pte = virt_to_pte(current->mm, virt);
 49	int err, dummy_code;
 50
 51	if ((pte == NULL) || !pte_present(*pte) ||
 52	    (is_write && !pte_write(*pte))) {
 53		err = handle_page_fault(virt, 0, is_write, 1, &dummy_code);
 54		if (err)
 55			return NULL;
 56		pte = virt_to_pte(current->mm, virt);
 57	}
 58	if (!pte_present(*pte))
 59		pte = NULL;
 60
 61	return pte;
 62}
 63
 64static int do_op_one_page(unsigned long addr, int len, int is_write,
 65		 int (*op)(unsigned long addr, int len, void *arg), void *arg)
 66{
 67	struct page *page;
 68	pte_t *pte;
 69	int n;
 70
 71	pte = maybe_map(addr, is_write);
 72	if (pte == NULL)
 73		return -1;
 74
 75	page = pte_page(*pte);
 76#ifdef CONFIG_64BIT
 77	pagefault_disable();
 78	addr = (unsigned long) page_address(page) +
 79		(addr & ~PAGE_MASK);
 80#else
 81	addr = (unsigned long) kmap_atomic(page) +
 82		(addr & ~PAGE_MASK);
 83#endif
 84	n = (*op)(addr, len, arg);
 85
 86#ifdef CONFIG_64BIT
 87	pagefault_enable();
 88#else
 89	kunmap_atomic((void *)addr);
 90#endif
 91
 92	return n;
 93}
 94
 95static long buffer_op(unsigned long addr, int len, int is_write,
 96		      int (*op)(unsigned long, int, void *), void *arg)
 97{
 98	long size, remain, n;
 99
100	size = min(PAGE_ALIGN(addr) - addr, (unsigned long) len);
101	remain = len;
102
103	n = do_op_one_page(addr, size, is_write, op, arg);
104	if (n != 0) {
105		remain = (n < 0 ? remain : 0);
106		goto out;
107	}
108
109	addr += size;
110	remain -= size;
111	if (remain == 0)
112		goto out;
113
114	while (addr < ((addr + remain) & PAGE_MASK)) {
115		n = do_op_one_page(addr, PAGE_SIZE, is_write, op, arg);
116		if (n != 0) {
117			remain = (n < 0 ? remain : 0);
118			goto out;
119		}
120
121		addr += PAGE_SIZE;
122		remain -= PAGE_SIZE;
123	}
124	if (remain == 0)
125		goto out;
126
127	n = do_op_one_page(addr, remain, is_write, op, arg);
128	if (n != 0) {
129		remain = (n < 0 ? remain : 0);
130		goto out;
131	}
132
133	return 0;
134 out:
135	return remain;
136}
137
138static int copy_chunk_from_user(unsigned long from, int len, void *arg)
139{
140	unsigned long *to_ptr = arg, to = *to_ptr;
141
142	memcpy((void *) to, (void *) from, len);
143	*to_ptr += len;
144	return 0;
145}
146
147unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n)
148{
 
 
 
 
 
149	return buffer_op((unsigned long) from, n, 0, copy_chunk_from_user, &to);
150}
151EXPORT_SYMBOL(raw_copy_from_user);
152
153static int copy_chunk_to_user(unsigned long to, int len, void *arg)
154{
155	unsigned long *from_ptr = arg, from = *from_ptr;
156
157	memcpy((void *) to, (void *) from, len);
158	*from_ptr += len;
159	return 0;
160}
161
162unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n)
163{
 
 
 
 
 
164	return buffer_op((unsigned long) to, n, 1, copy_chunk_to_user, &from);
165}
166EXPORT_SYMBOL(raw_copy_to_user);
167
168static int strncpy_chunk_from_user(unsigned long from, int len, void *arg)
169{
170	char **to_ptr = arg, *to = *to_ptr;
171	int n;
172
173	strncpy(to, (void *) from, len);
174	n = strnlen(to, len);
175	*to_ptr += n;
176
177	if (n < len)
178	        return 1;
179	return 0;
180}
181
182long strncpy_from_user(char *dst, const char __user *src, long count)
183{
184	long n;
185	char *ptr = dst;
186
187	if (!access_ok(src, 1))
188		return -EFAULT;
 
 
 
189	n = buffer_op((unsigned long) src, count, 0, strncpy_chunk_from_user,
190		      &ptr);
191	if (n != 0)
192		return -EFAULT;
193	return strnlen(dst, count);
194}
195EXPORT_SYMBOL(strncpy_from_user);
196
197static int clear_chunk(unsigned long addr, int len, void *unused)
198{
199	memset((void *) addr, 0, len);
200	return 0;
201}
202
203unsigned long __clear_user(void __user *mem, unsigned long len)
204{
 
 
 
 
 
205	return buffer_op((unsigned long) mem, len, 1, clear_chunk, NULL);
206}
207EXPORT_SYMBOL(__clear_user);
208
209static int strnlen_chunk(unsigned long str, int len, void *arg)
210{
211	int *len_ptr = arg, n;
212
213	n = strnlen((void *) str, len);
214	*len_ptr += n;
215
216	if (n < len)
217		return 1;
218	return 0;
219}
220
221long strnlen_user(const char __user *str, long len)
222{
223	int count = 0, n;
224
225	if (!access_ok(str, 1))
226		return -EFAULT;
 
227	n = buffer_op((unsigned long) str, len, 0, strnlen_chunk, &count);
228	if (n == 0)
229		return count + 1;
230	return 0;
231}
232EXPORT_SYMBOL(strnlen_user);
233
234/**
235 * arch_futex_atomic_op_inuser() - Atomic arithmetic operation with constant
236 *			  argument and comparison of the previous
237 *			  futex value with another constant.
238 *
239 * @op:		operation to execute
240 * @oparg:	argument to operation
241 * @oval:	old value at uaddr
242 * @uaddr:	pointer to user space address
243 *
244 * Return:
245 * 0 - On success
246 * -EFAULT - User access resulted in a page fault
247 * -EAGAIN - Atomic operation was unable to complete due to contention
248 * -ENOSYS - Operation not supported
249 */
250
251int arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr)
252{
253	int oldval, ret;
254	struct page *page;
255	unsigned long addr = (unsigned long) uaddr;
256	pte_t *pte;
257
258	ret = -EFAULT;
259	if (!access_ok(uaddr, sizeof(*uaddr)))
260		return -EFAULT;
261	preempt_disable();
262	pte = maybe_map(addr, 1);
263	if (pte == NULL)
264		goto out_inuser;
265
266	page = pte_page(*pte);
267#ifdef CONFIG_64BIT
268	pagefault_disable();
269	addr = (unsigned long) page_address(page) +
270			(((unsigned long) addr) & ~PAGE_MASK);
271#else
272	addr = (unsigned long) kmap_atomic(page) +
273		((unsigned long) addr & ~PAGE_MASK);
274#endif
275	uaddr = (u32 *) addr;
276	oldval = *uaddr;
277
278	ret = 0;
279
280	switch (op) {
281	case FUTEX_OP_SET:
282		*uaddr = oparg;
283		break;
284	case FUTEX_OP_ADD:
285		*uaddr += oparg;
286		break;
287	case FUTEX_OP_OR:
288		*uaddr |= oparg;
289		break;
290	case FUTEX_OP_ANDN:
291		*uaddr &= ~oparg;
292		break;
293	case FUTEX_OP_XOR:
294		*uaddr ^= oparg;
295		break;
296	default:
297		ret = -ENOSYS;
298	}
299#ifdef CONFIG_64BIT
300	pagefault_enable();
301#else
302	kunmap_atomic((void *)addr);
303#endif
304
305out_inuser:
306	preempt_enable();
307
308	if (ret == 0)
309		*oval = oldval;
310
311	return ret;
312}
313EXPORT_SYMBOL(arch_futex_atomic_op_inuser);
314
315/**
316 * futex_atomic_cmpxchg_inatomic() - Compare and exchange the content of the
317 *				uaddr with newval if the current value is
318 *				oldval.
319 * @uval:	pointer to store content of @uaddr
320 * @uaddr:	pointer to user space address
321 * @oldval:	old value
322 * @newval:	new value to store to @uaddr
323 *
324 * Return:
325 * 0 - On success
326 * -EFAULT - User access resulted in a page fault
327 * -EAGAIN - Atomic operation was unable to complete due to contention
 
328 */
329
330int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
331			      u32 oldval, u32 newval)
332{
333	struct page *page;
334	pte_t *pte;
335	int ret = -EFAULT;
336
337	if (!access_ok(uaddr, sizeof(*uaddr)))
338		return -EFAULT;
339
340	preempt_disable();
341	pte = maybe_map((unsigned long) uaddr, 1);
342	if (pte == NULL)
343		goto out_inatomic;
344
345	page = pte_page(*pte);
346#ifdef CONFIG_64BIT
347	pagefault_disable();
348	uaddr = page_address(page) + (((unsigned long) uaddr) & ~PAGE_MASK);
349#else
350	uaddr = kmap_atomic(page) + ((unsigned long) uaddr & ~PAGE_MASK);
351#endif
352
353	*uval = *uaddr;
354
355	ret = cmpxchg(uaddr, oldval, newval);
356
357#ifdef CONFIG_64BIT
358	pagefault_enable();
359#else
360	kunmap_atomic(uaddr);
361#endif
362	ret = 0;
363
364out_inatomic:
365	preempt_enable();
366	return ret;
367}
368EXPORT_SYMBOL(futex_atomic_cmpxchg_inatomic);
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  4 */
  5
  6#include <linux/err.h>
  7#include <linux/highmem.h>
  8#include <linux/mm.h>
  9#include <linux/module.h>
 10#include <linux/sched.h>
 11#include <asm/current.h>
 12#include <asm/page.h>
 13#include <kern_util.h>
 14#include <asm/futex.h>
 15#include <os.h>
 16
 17pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr)
 18{
 19	pgd_t *pgd;
 20	p4d_t *p4d;
 21	pud_t *pud;
 22	pmd_t *pmd;
 23
 24	if (mm == NULL)
 25		return NULL;
 26
 27	pgd = pgd_offset(mm, addr);
 28	if (!pgd_present(*pgd))
 29		return NULL;
 30
 31	p4d = p4d_offset(pgd, addr);
 32	if (!p4d_present(*p4d))
 33		return NULL;
 34
 35	pud = pud_offset(p4d, addr);
 36	if (!pud_present(*pud))
 37		return NULL;
 38
 39	pmd = pmd_offset(pud, addr);
 40	if (!pmd_present(*pmd))
 41		return NULL;
 42
 43	return pte_offset_kernel(pmd, addr);
 44}
 45
 46static pte_t *maybe_map(unsigned long virt, int is_write)
 47{
 48	pte_t *pte = virt_to_pte(current->mm, virt);
 49	int err, dummy_code;
 50
 51	if ((pte == NULL) || !pte_present(*pte) ||
 52	    (is_write && !pte_write(*pte))) {
 53		err = handle_page_fault(virt, 0, is_write, 1, &dummy_code);
 54		if (err)
 55			return NULL;
 56		pte = virt_to_pte(current->mm, virt);
 57	}
 58	if (!pte_present(*pte))
 59		pte = NULL;
 60
 61	return pte;
 62}
 63
 64static int do_op_one_page(unsigned long addr, int len, int is_write,
 65		 int (*op)(unsigned long addr, int len, void *arg), void *arg)
 66{
 67	struct page *page;
 68	pte_t *pte;
 69	int n;
 70
 71	pte = maybe_map(addr, is_write);
 72	if (pte == NULL)
 73		return -1;
 74
 75	page = pte_page(*pte);
 76#ifdef CONFIG_64BIT
 77	pagefault_disable();
 78	addr = (unsigned long) page_address(page) +
 79		(addr & ~PAGE_MASK);
 80#else
 81	addr = (unsigned long) kmap_atomic(page) +
 82		(addr & ~PAGE_MASK);
 83#endif
 84	n = (*op)(addr, len, arg);
 85
 86#ifdef CONFIG_64BIT
 87	pagefault_enable();
 88#else
 89	kunmap_atomic((void *)addr);
 90#endif
 91
 92	return n;
 93}
 94
 95static long buffer_op(unsigned long addr, int len, int is_write,
 96		      int (*op)(unsigned long, int, void *), void *arg)
 97{
 98	long size, remain, n;
 99
100	size = min(PAGE_ALIGN(addr) - addr, (unsigned long) len);
101	remain = len;
102
103	n = do_op_one_page(addr, size, is_write, op, arg);
104	if (n != 0) {
105		remain = (n < 0 ? remain : 0);
106		goto out;
107	}
108
109	addr += size;
110	remain -= size;
111	if (remain == 0)
112		goto out;
113
114	while (addr < ((addr + remain) & PAGE_MASK)) {
115		n = do_op_one_page(addr, PAGE_SIZE, is_write, op, arg);
116		if (n != 0) {
117			remain = (n < 0 ? remain : 0);
118			goto out;
119		}
120
121		addr += PAGE_SIZE;
122		remain -= PAGE_SIZE;
123	}
124	if (remain == 0)
125		goto out;
126
127	n = do_op_one_page(addr, remain, is_write, op, arg);
128	if (n != 0) {
129		remain = (n < 0 ? remain : 0);
130		goto out;
131	}
132
133	return 0;
134 out:
135	return remain;
136}
137
138static int copy_chunk_from_user(unsigned long from, int len, void *arg)
139{
140	unsigned long *to_ptr = arg, to = *to_ptr;
141
142	memcpy((void *) to, (void *) from, len);
143	*to_ptr += len;
144	return 0;
145}
146
147unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n)
148{
149	if (uaccess_kernel()) {
150		memcpy(to, (__force void*)from, n);
151		return 0;
152	}
153
154	return buffer_op((unsigned long) from, n, 0, copy_chunk_from_user, &to);
155}
156EXPORT_SYMBOL(raw_copy_from_user);
157
158static int copy_chunk_to_user(unsigned long to, int len, void *arg)
159{
160	unsigned long *from_ptr = arg, from = *from_ptr;
161
162	memcpy((void *) to, (void *) from, len);
163	*from_ptr += len;
164	return 0;
165}
166
167unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n)
168{
169	if (uaccess_kernel()) {
170		memcpy((__force void *) to, from, n);
171		return 0;
172	}
173
174	return buffer_op((unsigned long) to, n, 1, copy_chunk_to_user, &from);
175}
176EXPORT_SYMBOL(raw_copy_to_user);
177
178static int strncpy_chunk_from_user(unsigned long from, int len, void *arg)
179{
180	char **to_ptr = arg, *to = *to_ptr;
181	int n;
182
183	strncpy(to, (void *) from, len);
184	n = strnlen(to, len);
185	*to_ptr += n;
186
187	if (n < len)
188	        return 1;
189	return 0;
190}
191
192long __strncpy_from_user(char *dst, const char __user *src, long count)
193{
194	long n;
195	char *ptr = dst;
196
197	if (uaccess_kernel()) {
198		strncpy(dst, (__force void *) src, count);
199		return strnlen(dst, count);
200	}
201
202	n = buffer_op((unsigned long) src, count, 0, strncpy_chunk_from_user,
203		      &ptr);
204	if (n != 0)
205		return -EFAULT;
206	return strnlen(dst, count);
207}
208EXPORT_SYMBOL(__strncpy_from_user);
209
210static int clear_chunk(unsigned long addr, int len, void *unused)
211{
212	memset((void *) addr, 0, len);
213	return 0;
214}
215
216unsigned long __clear_user(void __user *mem, unsigned long len)
217{
218	if (uaccess_kernel()) {
219		memset((__force void*)mem, 0, len);
220		return 0;
221	}
222
223	return buffer_op((unsigned long) mem, len, 1, clear_chunk, NULL);
224}
225EXPORT_SYMBOL(__clear_user);
226
227static int strnlen_chunk(unsigned long str, int len, void *arg)
228{
229	int *len_ptr = arg, n;
230
231	n = strnlen((void *) str, len);
232	*len_ptr += n;
233
234	if (n < len)
235		return 1;
236	return 0;
237}
238
239long __strnlen_user(const void __user *str, long len)
240{
241	int count = 0, n;
242
243	if (uaccess_kernel())
244		return strnlen((__force char*)str, len) + 1;
245
246	n = buffer_op((unsigned long) str, len, 0, strnlen_chunk, &count);
247	if (n == 0)
248		return count + 1;
249	return 0;
250}
251EXPORT_SYMBOL(__strnlen_user);
252
253/**
254 * arch_futex_atomic_op_inuser() - Atomic arithmetic operation with constant
255 *			  argument and comparison of the previous
256 *			  futex value with another constant.
257 *
258 * @encoded_op:	encoded operation to execute
 
 
259 * @uaddr:	pointer to user space address
260 *
261 * Return:
262 * 0 - On success
263 * -EFAULT - User access resulted in a page fault
264 * -EAGAIN - Atomic operation was unable to complete due to contention
265 * -ENOSYS - Operation not supported
266 */
267
268int arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr)
269{
270	int oldval, ret;
271	struct page *page;
272	unsigned long addr = (unsigned long) uaddr;
273	pte_t *pte;
274
275	ret = -EFAULT;
276	if (!access_ok(uaddr, sizeof(*uaddr)))
277		return -EFAULT;
278	preempt_disable();
279	pte = maybe_map(addr, 1);
280	if (pte == NULL)
281		goto out_inuser;
282
283	page = pte_page(*pte);
284#ifdef CONFIG_64BIT
285	pagefault_disable();
286	addr = (unsigned long) page_address(page) +
287			(((unsigned long) addr) & ~PAGE_MASK);
288#else
289	addr = (unsigned long) kmap_atomic(page) +
290		((unsigned long) addr & ~PAGE_MASK);
291#endif
292	uaddr = (u32 *) addr;
293	oldval = *uaddr;
294
295	ret = 0;
296
297	switch (op) {
298	case FUTEX_OP_SET:
299		*uaddr = oparg;
300		break;
301	case FUTEX_OP_ADD:
302		*uaddr += oparg;
303		break;
304	case FUTEX_OP_OR:
305		*uaddr |= oparg;
306		break;
307	case FUTEX_OP_ANDN:
308		*uaddr &= ~oparg;
309		break;
310	case FUTEX_OP_XOR:
311		*uaddr ^= oparg;
312		break;
313	default:
314		ret = -ENOSYS;
315	}
316#ifdef CONFIG_64BIT
317	pagefault_enable();
318#else
319	kunmap_atomic((void *)addr);
320#endif
321
322out_inuser:
323	preempt_enable();
324
325	if (ret == 0)
326		*oval = oldval;
327
328	return ret;
329}
330EXPORT_SYMBOL(arch_futex_atomic_op_inuser);
331
332/**
333 * futex_atomic_cmpxchg_inatomic() - Compare and exchange the content of the
334 *				uaddr with newval if the current value is
335 *				oldval.
336 * @uval:	pointer to store content of @uaddr
337 * @uaddr:	pointer to user space address
338 * @oldval:	old value
339 * @newval:	new value to store to @uaddr
340 *
341 * Return:
342 * 0 - On success
343 * -EFAULT - User access resulted in a page fault
344 * -EAGAIN - Atomic operation was unable to complete due to contention
345 * -ENOSYS - Function not implemented (only if !HAVE_FUTEX_CMPXCHG)
346 */
347
348int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
349			      u32 oldval, u32 newval)
350{
351	struct page *page;
352	pte_t *pte;
353	int ret = -EFAULT;
354
355	if (!access_ok(uaddr, sizeof(*uaddr)))
356		return -EFAULT;
357
358	preempt_disable();
359	pte = maybe_map((unsigned long) uaddr, 1);
360	if (pte == NULL)
361		goto out_inatomic;
362
363	page = pte_page(*pte);
364#ifdef CONFIG_64BIT
365	pagefault_disable();
366	uaddr = page_address(page) + (((unsigned long) uaddr) & ~PAGE_MASK);
367#else
368	uaddr = kmap_atomic(page) + ((unsigned long) uaddr & ~PAGE_MASK);
369#endif
370
371	*uval = *uaddr;
372
373	ret = cmpxchg(uaddr, oldval, newval);
374
375#ifdef CONFIG_64BIT
376	pagefault_enable();
377#else
378	kunmap_atomic(uaddr);
379#endif
380	ret = 0;
381
382out_inatomic:
383	preempt_enable();
384	return ret;
385}
386EXPORT_SYMBOL(futex_atomic_cmpxchg_inatomic);