Linux Audio

Check our new training course

Loading...
v3.1
 
  1/* 
  2 * User address space access functions.
  3 *
  4 * Copyright 1997 Andi Kleen <ak@muc.de>
  5 * Copyright 1997 Linus Torvalds
  6 * Copyright 2002 Andi Kleen <ak@suse.de>
  7 */
  8#include <linux/module.h>
  9#include <asm/uaccess.h>
 10
 11/*
 12 * Copy a null terminated string from userspace.
 13 */
 14
 15#define __do_strncpy_from_user(dst,src,count,res)			   \
 16do {									   \
 17	long __d0, __d1, __d2;						   \
 18	might_fault();							   \
 19	__asm__ __volatile__(						   \
 20		"	testq %1,%1\n"					   \
 21		"	jz 2f\n"					   \
 22		"0:	lodsb\n"					   \
 23		"	stosb\n"					   \
 24		"	testb %%al,%%al\n"				   \
 25		"	jz 1f\n"					   \
 26		"	decq %1\n"					   \
 27		"	jnz 0b\n"					   \
 28		"1:	subq %1,%0\n"					   \
 29		"2:\n"							   \
 30		".section .fixup,\"ax\"\n"				   \
 31		"3:	movq %5,%0\n"					   \
 32		"	jmp 2b\n"					   \
 33		".previous\n"						   \
 34		_ASM_EXTABLE(0b,3b)					   \
 35		: "=&r"(res), "=&c"(count), "=&a" (__d0), "=&S" (__d1),	   \
 36		  "=&D" (__d2)						   \
 37		: "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \
 38		: "memory");						   \
 39} while (0)
 40
 41long
 42__strncpy_from_user(char *dst, const char __user *src, long count)
 43{
 44	long res;
 45	__do_strncpy_from_user(dst, src, count, res);
 46	return res;
 47}
 48EXPORT_SYMBOL(__strncpy_from_user);
 49
 50long
 51strncpy_from_user(char *dst, const char __user *src, long count)
 52{
 53	long res = -EFAULT;
 54	if (access_ok(VERIFY_READ, src, 1))
 55		return __strncpy_from_user(dst, src, count);
 56	return res;
 57}
 58EXPORT_SYMBOL(strncpy_from_user);
 59
 60/*
 61 * Zero Userspace
 62 */
 63
 64unsigned long __clear_user(void __user *addr, unsigned long size)
 65{
 66	long __d0;
 67	might_fault();
 68	/* no memory constraint because it doesn't change any memory gcc knows
 69	   about */
 70	asm volatile(
 71		"	testq  %[size8],%[size8]\n"
 72		"	jz     4f\n"
 73		"0:	movq %[zero],(%[dst])\n"
 74		"	addq   %[eight],%[dst]\n"
 75		"	decl %%ecx ; jnz   0b\n"
 76		"4:	movq  %[size1],%%rcx\n"
 77		"	testl %%ecx,%%ecx\n"
 78		"	jz     2f\n"
 79		"1:	movb   %b[zero],(%[dst])\n"
 80		"	incq   %[dst]\n"
 81		"	decl %%ecx ; jnz  1b\n"
 82		"2:\n"
 83		".section .fixup,\"ax\"\n"
 84		"3:	lea 0(%[size1],%[size8],8),%[size8]\n"
 85		"	jmp 2b\n"
 86		".previous\n"
 87		_ASM_EXTABLE(0b,3b)
 88		_ASM_EXTABLE(1b,2b)
 89		: [size8] "=&c"(size), [dst] "=&D" (__d0)
 90		: [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
 91		  [zero] "r" (0UL), [eight] "r" (8UL));
 92	return size;
 93}
 94EXPORT_SYMBOL(__clear_user);
 95
 96unsigned long clear_user(void __user *to, unsigned long n)
 97{
 98	if (access_ok(VERIFY_WRITE, to, n))
 99		return __clear_user(to, n);
100	return n;
101}
102EXPORT_SYMBOL(clear_user);
103
104/*
105 * Return the size of a string (including the ending 0)
106 *
107 * Return 0 on exception, a value greater than N if too long
 
 
108 */
109
110long __strnlen_user(const char __user *s, long n)
111{
112	long res = 0;
113	char c;
114
115	while (1) {
116		if (res>n)
117			return n+1;
118		if (__get_user(c, s))
119			return 0;
120		if (!c)
121			return res+1;
122		res++;
123		s++;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
124	}
125}
126EXPORT_SYMBOL(__strnlen_user);
127
128long strnlen_user(const char __user *s, long n)
129{
130	if (!access_ok(VERIFY_READ, s, 1))
131		return 0;
132	return __strnlen_user(s, n);
133}
134EXPORT_SYMBOL(strnlen_user);
135
136long strlen_user(const char __user *s)
137{
138	long res = 0;
139	char c;
 
 
 
 
140
141	for (;;) {
142		if (get_user(c, s))
143			return 0;
144		if (!c)
145			return res+1;
146		res++;
147		s++;
148	}
149}
150EXPORT_SYMBOL(strlen_user);
151
152unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
153{
154	if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) { 
155		return copy_user_generic((__force void *)to, (__force void *)from, len);
156	} 
157	return len;		
158}
159EXPORT_SYMBOL(copy_in_user);
 
 
 
 
 
 
 
 
160
161/*
162 * Try to copy last bytes and clear the rest if needed.
163 * Since protection fault in copy_from/to_user is not a normal situation,
164 * it is not necessary to optimize tail handling.
165 */
166unsigned long
167copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
168{
169	char c;
170	unsigned zero_len;
171
172	for (; len; --len) {
173		if (__get_user_nocheck(c, from++, sizeof(char)))
174			break;
175		if (__put_user_nocheck(c, to++, sizeof(char)))
176			break;
 
 
 
 
177	}
178
179	for (c = 0, zero_len = len; zerorest && zero_len; --zero_len)
180		if (__put_user_nocheck(c, to++, sizeof(char)))
181			break;
182	return len;
 
183}
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/* 
  3 * User address space access functions.
  4 *
  5 * Copyright 1997 Andi Kleen <ak@muc.de>
  6 * Copyright 1997 Linus Torvalds
  7 * Copyright 2002 Andi Kleen <ak@suse.de>
  8 */
  9#include <linux/export.h>
 10#include <linux/uaccess.h>
 11#include <linux/highmem.h>
 12#include <linux/libnvdimm.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 13
 14/*
 15 * Zero Userspace
 16 */
 17
 18#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
 19/**
 20 * clean_cache_range - write back a cache range with CLWB
 21 * @vaddr:	virtual start address
 22 * @size:	number of bytes to write back
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 23 *
 24 * Write back a cache range using the CLWB (cache line write back)
 25 * instruction. Note that @size is internally rounded up to be cache
 26 * line size aligned.
 27 */
 28static void clean_cache_range(void *addr, size_t size)
 
 29{
 30	u16 x86_clflush_size = boot_cpu_data.x86_clflush_size;
 31	unsigned long clflush_mask = x86_clflush_size - 1;
 32	void *vend = addr + size;
 33	void *p;
 34
 35	for (p = (void *)((unsigned long)addr & ~clflush_mask);
 36	     p < vend; p += x86_clflush_size)
 37		clwb(p);
 38}
 39
 40void arch_wb_cache_pmem(void *addr, size_t size)
 41{
 42	clean_cache_range(addr, size);
 43}
 44EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);
 45
 46long __copy_user_flushcache(void *dst, const void __user *src, unsigned size)
 47{
 48	unsigned long flushed, dest = (unsigned long) dst;
 49	long rc;
 50
 51	stac();
 52	rc = __copy_user_nocache(dst, src, size);
 53	clac();
 54
 55	/*
 56	 * __copy_user_nocache() uses non-temporal stores for the bulk
 57	 * of the transfer, but we need to manually flush if the
 58	 * transfer is unaligned. A cached memory copy is used when
 59	 * destination or size is not naturally aligned. That is:
 60	 *   - Require 8-byte alignment when size is 8 bytes or larger.
 61	 *   - Require 4-byte alignment when size is 4 bytes.
 62	 */
 63	if (size < 8) {
 64		if (!IS_ALIGNED(dest, 4) || size != 4)
 65			clean_cache_range(dst, size);
 66	} else {
 67		if (!IS_ALIGNED(dest, 8)) {
 68			dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
 69			clean_cache_range(dst, 1);
 70		}
 71
 72		flushed = dest - (unsigned long) dst;
 73		if (size > flushed && !IS_ALIGNED(size - flushed, 8))
 74			clean_cache_range(dst + size - 1, 1);
 75	}
 
 
 76
 77	return rc;
 
 
 
 
 78}
 
 79
 80void __memcpy_flushcache(void *_dst, const void *_src, size_t size)
 81{
 82	unsigned long dest = (unsigned long) _dst;
 83	unsigned long source = (unsigned long) _src;
 84
 85	/* cache copy and flush to align dest */
 86	if (!IS_ALIGNED(dest, 8)) {
 87		size_t len = min_t(size_t, size, ALIGN(dest, 8) - dest);
 88
 89		memcpy((void *) dest, (void *) source, len);
 90		clean_cache_range((void *) dest, len);
 91		dest += len;
 92		source += len;
 93		size -= len;
 94		if (!size)
 95			return;
 96	}
 
 
 97
 98	/* 4x8 movnti loop */
 99	while (size >= 32) {
100		asm("movq    (%0), %%r8\n"
101		    "movq   8(%0), %%r9\n"
102		    "movq  16(%0), %%r10\n"
103		    "movq  24(%0), %%r11\n"
104		    "movnti  %%r8,   (%1)\n"
105		    "movnti  %%r9,  8(%1)\n"
106		    "movnti %%r10, 16(%1)\n"
107		    "movnti %%r11, 24(%1)\n"
108		    :: "r" (source), "r" (dest)
109		    : "memory", "r8", "r9", "r10", "r11");
110		dest += 32;
111		source += 32;
112		size -= 32;
113	}
114
115	/* 1x8 movnti loop */
116	while (size >= 8) {
117		asm("movq    (%0), %%r8\n"
118		    "movnti  %%r8,   (%1)\n"
119		    :: "r" (source), "r" (dest)
120		    : "memory", "r8");
121		dest += 8;
122		source += 8;
123		size -= 8;
124	}
125
126	/* 1x4 movnti loop */
127	while (size >= 4) {
128		asm("movl    (%0), %%r8d\n"
129		    "movnti  %%r8d,   (%1)\n"
130		    :: "r" (source), "r" (dest)
131		    : "memory", "r8");
132		dest += 4;
133		source += 4;
134		size -= 4;
135	}
136
137	/* cache copy for remaining bytes */
138	if (size) {
139		memcpy((void *) dest, (void *) source, size);
140		clean_cache_range((void *) dest, size);
141	}
142}
143EXPORT_SYMBOL_GPL(__memcpy_flushcache);
144#endif