Loading...
1/*
2 * User address space access functions.
3 *
4 * Copyright 1997 Andi Kleen <ak@muc.de>
5 * Copyright 1997 Linus Torvalds
6 * Copyright 2002 Andi Kleen <ak@suse.de>
7 */
8#include <linux/export.h>
9#include <linux/uaccess.h>
10
11/*
12 * Zero Userspace
13 */
14
15unsigned long __clear_user(void __user *addr, unsigned long size)
16{
17 long __d0;
18 might_fault();
19 /* no memory constraint because it doesn't change any memory gcc knows
20 about */
21 stac();
22 asm volatile(
23 " testq %[size8],%[size8]\n"
24 " jz 4f\n"
25 "0: movq %[zero],(%[dst])\n"
26 " addq %[eight],%[dst]\n"
27 " decl %%ecx ; jnz 0b\n"
28 "4: movq %[size1],%%rcx\n"
29 " testl %%ecx,%%ecx\n"
30 " jz 2f\n"
31 "1: movb %b[zero],(%[dst])\n"
32 " incq %[dst]\n"
33 " decl %%ecx ; jnz 1b\n"
34 "2:\n"
35 ".section .fixup,\"ax\"\n"
36 "3: lea 0(%[size1],%[size8],8),%[size8]\n"
37 " jmp 2b\n"
38 ".previous\n"
39 _ASM_EXTABLE(0b,3b)
40 _ASM_EXTABLE(1b,2b)
41 : [size8] "=&c"(size), [dst] "=&D" (__d0)
42 : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
43 [zero] "r" (0UL), [eight] "r" (8UL));
44 clac();
45 return size;
46}
47EXPORT_SYMBOL(__clear_user);
48
49unsigned long clear_user(void __user *to, unsigned long n)
50{
51 if (access_ok(VERIFY_WRITE, to, n))
52 return __clear_user(to, n);
53 return n;
54}
55EXPORT_SYMBOL(clear_user);
56
57unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
58{
59 if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
60 return copy_user_generic((__force void *)to, (__force void *)from, len);
61 }
62 return len;
63}
64EXPORT_SYMBOL(copy_in_user);
65
66/*
67 * Try to copy last bytes and clear the rest if needed.
68 * Since protection fault in copy_from/to_user is not a normal situation,
69 * it is not necessary to optimize tail handling.
70 */
71__visible unsigned long
72copy_user_handle_tail(char *to, char *from, unsigned len)
73{
74 for (; len; --len, to++) {
75 char c;
76
77 if (__get_user_nocheck(c, from++, sizeof(char)))
78 break;
79 if (__put_user_nocheck(c, to, sizeof(char)))
80 break;
81 }
82 clac();
83
84 /* If the destination is a kernel buffer, we always clear the end */
85 if (!__addr_ok(to))
86 memset(to, 0, len);
87 return len;
88}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * User address space access functions.
4 *
5 * Copyright 1997 Andi Kleen <ak@muc.de>
6 * Copyright 1997 Linus Torvalds
7 * Copyright 2002 Andi Kleen <ak@suse.de>
8 */
9#include <linux/export.h>
10#include <linux/uaccess.h>
11#include <linux/highmem.h>
12#include <linux/libnvdimm.h>
13
14/*
15 * Zero Userspace
16 */
17
18#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
19/**
20 * clean_cache_range - write back a cache range with CLWB
21 * @vaddr: virtual start address
22 * @size: number of bytes to write back
23 *
24 * Write back a cache range using the CLWB (cache line write back)
25 * instruction. Note that @size is internally rounded up to be cache
26 * line size aligned.
27 */
28static void clean_cache_range(void *addr, size_t size)
29{
30 u16 x86_clflush_size = boot_cpu_data.x86_clflush_size;
31 unsigned long clflush_mask = x86_clflush_size - 1;
32 void *vend = addr + size;
33 void *p;
34
35 for (p = (void *)((unsigned long)addr & ~clflush_mask);
36 p < vend; p += x86_clflush_size)
37 clwb(p);
38}
39
40void arch_wb_cache_pmem(void *addr, size_t size)
41{
42 clean_cache_range(addr, size);
43}
44EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);
45
46long __copy_user_flushcache(void *dst, const void __user *src, unsigned size)
47{
48 unsigned long flushed, dest = (unsigned long) dst;
49 long rc;
50
51 stac();
52 rc = __copy_user_nocache(dst, src, size);
53 clac();
54
55 /*
56 * __copy_user_nocache() uses non-temporal stores for the bulk
57 * of the transfer, but we need to manually flush if the
58 * transfer is unaligned. A cached memory copy is used when
59 * destination or size is not naturally aligned. That is:
60 * - Require 8-byte alignment when size is 8 bytes or larger.
61 * - Require 4-byte alignment when size is 4 bytes.
62 */
63 if (size < 8) {
64 if (!IS_ALIGNED(dest, 4) || size != 4)
65 clean_cache_range(dst, size);
66 } else {
67 if (!IS_ALIGNED(dest, 8)) {
68 dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
69 clean_cache_range(dst, 1);
70 }
71
72 flushed = dest - (unsigned long) dst;
73 if (size > flushed && !IS_ALIGNED(size - flushed, 8))
74 clean_cache_range(dst + size - 1, 1);
75 }
76
77 return rc;
78}
79
80void __memcpy_flushcache(void *_dst, const void *_src, size_t size)
81{
82 unsigned long dest = (unsigned long) _dst;
83 unsigned long source = (unsigned long) _src;
84
85 /* cache copy and flush to align dest */
86 if (!IS_ALIGNED(dest, 8)) {
87 size_t len = min_t(size_t, size, ALIGN(dest, 8) - dest);
88
89 memcpy((void *) dest, (void *) source, len);
90 clean_cache_range((void *) dest, len);
91 dest += len;
92 source += len;
93 size -= len;
94 if (!size)
95 return;
96 }
97
98 /* 4x8 movnti loop */
99 while (size >= 32) {
100 asm("movq (%0), %%r8\n"
101 "movq 8(%0), %%r9\n"
102 "movq 16(%0), %%r10\n"
103 "movq 24(%0), %%r11\n"
104 "movnti %%r8, (%1)\n"
105 "movnti %%r9, 8(%1)\n"
106 "movnti %%r10, 16(%1)\n"
107 "movnti %%r11, 24(%1)\n"
108 :: "r" (source), "r" (dest)
109 : "memory", "r8", "r9", "r10", "r11");
110 dest += 32;
111 source += 32;
112 size -= 32;
113 }
114
115 /* 1x8 movnti loop */
116 while (size >= 8) {
117 asm("movq (%0), %%r8\n"
118 "movnti %%r8, (%1)\n"
119 :: "r" (source), "r" (dest)
120 : "memory", "r8");
121 dest += 8;
122 source += 8;
123 size -= 8;
124 }
125
126 /* 1x4 movnti loop */
127 while (size >= 4) {
128 asm("movl (%0), %%r8d\n"
129 "movnti %%r8d, (%1)\n"
130 :: "r" (source), "r" (dest)
131 : "memory", "r8");
132 dest += 4;
133 source += 4;
134 size -= 4;
135 }
136
137 /* cache copy for remaining bytes */
138 if (size) {
139 memcpy((void *) dest, (void *) source, size);
140 clean_cache_range((void *) dest, size);
141 }
142}
143EXPORT_SYMBOL_GPL(__memcpy_flushcache);
144#endif