Loading...
1/*
2 * Access kernel memory without faulting.
3 */
4#include <linux/export.h>
5#include <linux/mm.h>
6#include <linux/uaccess.h>
7
8/**
9 * probe_kernel_read(): safely attempt to read from a location
10 * @dst: pointer to the buffer that shall take the data
11 * @src: address to read from
12 * @size: size of the data chunk
13 *
14 * Safely read from address @src to the buffer at @dst. If a kernel fault
15 * happens, handle that and return -EFAULT.
16 *
17 * We ensure that the copy_from_user is executed in atomic context so that
18 * do_page_fault() doesn't attempt to take mmap_sem. This makes
19 * probe_kernel_read() suitable for use within regions where the caller
20 * already holds mmap_sem, or other locks which nest inside mmap_sem.
21 */
22
23long __weak probe_kernel_read(void *dst, const void *src, size_t size)
24 __attribute__((alias("__probe_kernel_read")));
25
26long __probe_kernel_read(void *dst, const void *src, size_t size)
27{
28 long ret;
29 mm_segment_t old_fs = get_fs();
30
31 set_fs(KERNEL_DS);
32 pagefault_disable();
33 ret = __copy_from_user_inatomic(dst,
34 (__force const void __user *)src, size);
35 pagefault_enable();
36 set_fs(old_fs);
37
38 return ret ? -EFAULT : 0;
39}
40EXPORT_SYMBOL_GPL(probe_kernel_read);
41
42/**
43 * probe_kernel_write(): safely attempt to write to a location
44 * @dst: address to write to
45 * @src: pointer to the data that shall be written
46 * @size: size of the data chunk
47 *
48 * Safely write to address @dst from the buffer at @src. If a kernel fault
49 * happens, handle that and return -EFAULT.
50 */
51long __weak probe_kernel_write(void *dst, const void *src, size_t size)
52 __attribute__((alias("__probe_kernel_write")));
53
54long __probe_kernel_write(void *dst, const void *src, size_t size)
55{
56 long ret;
57 mm_segment_t old_fs = get_fs();
58
59 set_fs(KERNEL_DS);
60 pagefault_disable();
61 ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
62 pagefault_enable();
63 set_fs(old_fs);
64
65 return ret ? -EFAULT : 0;
66}
67EXPORT_SYMBOL_GPL(probe_kernel_write);
68
69/**
70 * strncpy_from_unsafe: - Copy a NUL terminated string from unsafe address.
71 * @dst: Destination address, in kernel space. This buffer must be at
72 * least @count bytes long.
73 * @src: Unsafe address.
74 * @count: Maximum number of bytes to copy, including the trailing NUL.
75 *
76 * Copies a NUL-terminated string from unsafe address to kernel buffer.
77 *
78 * On success, returns the length of the string INCLUDING the trailing NUL.
79 *
80 * If access fails, returns -EFAULT (some data may have been copied
81 * and the trailing NUL added).
82 *
83 * If @count is smaller than the length of the string, copies @count-1 bytes,
84 * sets the last byte of @dst buffer to NUL and returns @count.
85 */
86long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count)
87{
88 mm_segment_t old_fs = get_fs();
89 const void *src = unsafe_addr;
90 long ret;
91
92 if (unlikely(count <= 0))
93 return 0;
94
95 set_fs(KERNEL_DS);
96 pagefault_disable();
97
98 do {
99 ret = __copy_from_user_inatomic(dst++,
100 (const void __user __force *)src++, 1);
101 } while (dst[-1] && ret == 0 && src - unsafe_addr < count);
102
103 dst[-1] = '\0';
104 pagefault_enable();
105 set_fs(old_fs);
106
107 return ret ? -EFAULT : src - unsafe_addr;
108}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Access kernel or user memory without faulting.
4 */
5#include <linux/export.h>
6#include <linux/mm.h>
7#include <linux/uaccess.h>
8#include <asm/tlb.h>
9
10bool __weak copy_from_kernel_nofault_allowed(const void *unsafe_src,
11 size_t size)
12{
13 return true;
14}
15
16/*
17 * The below only uses kmsan_check_memory() to ensure uninitialized kernel
18 * memory isn't leaked.
19 */
20#define copy_from_kernel_nofault_loop(dst, src, len, type, err_label) \
21 while (len >= sizeof(type)) { \
22 __get_kernel_nofault(dst, src, type, err_label); \
23 kmsan_check_memory(src, sizeof(type)); \
24 dst += sizeof(type); \
25 src += sizeof(type); \
26 len -= sizeof(type); \
27 }
28
29long copy_from_kernel_nofault(void *dst, const void *src, size_t size)
30{
31 unsigned long align = 0;
32
33 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
34 align = (unsigned long)dst | (unsigned long)src;
35
36 if (!copy_from_kernel_nofault_allowed(src, size))
37 return -ERANGE;
38
39 pagefault_disable();
40 if (!(align & 7))
41 copy_from_kernel_nofault_loop(dst, src, size, u64, Efault);
42 if (!(align & 3))
43 copy_from_kernel_nofault_loop(dst, src, size, u32, Efault);
44 if (!(align & 1))
45 copy_from_kernel_nofault_loop(dst, src, size, u16, Efault);
46 copy_from_kernel_nofault_loop(dst, src, size, u8, Efault);
47 pagefault_enable();
48 return 0;
49Efault:
50 pagefault_enable();
51 return -EFAULT;
52}
53EXPORT_SYMBOL_GPL(copy_from_kernel_nofault);
54
55#define copy_to_kernel_nofault_loop(dst, src, len, type, err_label) \
56 while (len >= sizeof(type)) { \
57 __put_kernel_nofault(dst, src, type, err_label); \
58 instrument_write(dst, sizeof(type)); \
59 dst += sizeof(type); \
60 src += sizeof(type); \
61 len -= sizeof(type); \
62 }
63
64long copy_to_kernel_nofault(void *dst, const void *src, size_t size)
65{
66 unsigned long align = 0;
67
68 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
69 align = (unsigned long)dst | (unsigned long)src;
70
71 pagefault_disable();
72 if (!(align & 7))
73 copy_to_kernel_nofault_loop(dst, src, size, u64, Efault);
74 if (!(align & 3))
75 copy_to_kernel_nofault_loop(dst, src, size, u32, Efault);
76 if (!(align & 1))
77 copy_to_kernel_nofault_loop(dst, src, size, u16, Efault);
78 copy_to_kernel_nofault_loop(dst, src, size, u8, Efault);
79 pagefault_enable();
80 return 0;
81Efault:
82 pagefault_enable();
83 return -EFAULT;
84}
85EXPORT_SYMBOL_GPL(copy_to_kernel_nofault);
86
87long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr, long count)
88{
89 const void *src = unsafe_addr;
90
91 if (unlikely(count <= 0))
92 return 0;
93 if (!copy_from_kernel_nofault_allowed(unsafe_addr, count))
94 return -ERANGE;
95
96 pagefault_disable();
97 do {
98 __get_kernel_nofault(dst, src, u8, Efault);
99 dst++;
100 src++;
101 } while (dst[-1] && src - unsafe_addr < count);
102 pagefault_enable();
103
104 dst[-1] = '\0';
105 return src - unsafe_addr;
106Efault:
107 pagefault_enable();
108 dst[0] = '\0';
109 return -EFAULT;
110}
111
112/**
113 * copy_from_user_nofault(): safely attempt to read from a user-space location
114 * @dst: pointer to the buffer that shall take the data
115 * @src: address to read from. This must be a user address.
116 * @size: size of the data chunk
117 *
118 * Safely read from user address @src to the buffer at @dst. If a kernel fault
119 * happens, handle that and return -EFAULT.
120 */
121long copy_from_user_nofault(void *dst, const void __user *src, size_t size)
122{
123 long ret = -EFAULT;
124
125 if (!__access_ok(src, size))
126 return ret;
127
128 if (!nmi_uaccess_okay())
129 return ret;
130
131 pagefault_disable();
132 ret = __copy_from_user_inatomic(dst, src, size);
133 pagefault_enable();
134
135 if (ret)
136 return -EFAULT;
137 return 0;
138}
139EXPORT_SYMBOL_GPL(copy_from_user_nofault);
140
141/**
142 * copy_to_user_nofault(): safely attempt to write to a user-space location
143 * @dst: address to write to
144 * @src: pointer to the data that shall be written
145 * @size: size of the data chunk
146 *
147 * Safely write to address @dst from the buffer at @src. If a kernel fault
148 * happens, handle that and return -EFAULT.
149 */
150long copy_to_user_nofault(void __user *dst, const void *src, size_t size)
151{
152 long ret = -EFAULT;
153
154 if (access_ok(dst, size)) {
155 pagefault_disable();
156 ret = __copy_to_user_inatomic(dst, src, size);
157 pagefault_enable();
158 }
159
160 if (ret)
161 return -EFAULT;
162 return 0;
163}
164EXPORT_SYMBOL_GPL(copy_to_user_nofault);
165
166/**
167 * strncpy_from_user_nofault: - Copy a NUL terminated string from unsafe user
168 * address.
169 * @dst: Destination address, in kernel space. This buffer must be at
170 * least @count bytes long.
171 * @unsafe_addr: Unsafe user address.
172 * @count: Maximum number of bytes to copy, including the trailing NUL.
173 *
174 * Copies a NUL-terminated string from unsafe user address to kernel buffer.
175 *
176 * On success, returns the length of the string INCLUDING the trailing NUL.
177 *
178 * If access fails, returns -EFAULT (some data may have been copied
179 * and the trailing NUL added).
180 *
181 * If @count is smaller than the length of the string, copies @count-1 bytes,
182 * sets the last byte of @dst buffer to NUL and returns @count.
183 */
184long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr,
185 long count)
186{
187 long ret;
188
189 if (unlikely(count <= 0))
190 return 0;
191
192 pagefault_disable();
193 ret = strncpy_from_user(dst, unsafe_addr, count);
194 pagefault_enable();
195
196 if (ret >= count) {
197 ret = count;
198 dst[ret - 1] = '\0';
199 } else if (ret > 0) {
200 ret++;
201 }
202
203 return ret;
204}
205
206/**
207 * strnlen_user_nofault: - Get the size of a user string INCLUDING final NUL.
208 * @unsafe_addr: The string to measure.
209 * @count: Maximum count (including NUL)
210 *
211 * Get the size of a NUL-terminated string in user space without pagefault.
212 *
213 * Returns the size of the string INCLUDING the terminating NUL.
214 *
215 * If the string is too long, returns a number larger than @count. User
216 * has to check the return value against "> count".
217 * On exception (or invalid count), returns 0.
218 *
219 * Unlike strnlen_user, this can be used from IRQ handler etc. because
220 * it disables pagefaults.
221 */
222long strnlen_user_nofault(const void __user *unsafe_addr, long count)
223{
224 int ret;
225
226 pagefault_disable();
227 ret = strnlen_user(unsafe_addr, count);
228 pagefault_enable();
229
230 return ret;
231}
232
233void __copy_overflow(int size, unsigned long count)
234{
235 WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
236}
237EXPORT_SYMBOL(__copy_overflow);