Loading...
1#ifndef _ASM_X86_STRING_64_H
2#define _ASM_X86_STRING_64_H
3
4#ifdef __KERNEL__
5#include <linux/jump_label.h>
6
7/* Written 2002 by Andi Kleen */
8
9/* Only used for special circumstances. Stolen from i386/string.h */
10static __always_inline void *__inline_memcpy(void *to, const void *from, size_t n)
11{
12 unsigned long d0, d1, d2;
13 asm volatile("rep ; movsl\n\t"
14 "testb $2,%b4\n\t"
15 "je 1f\n\t"
16 "movsw\n"
17 "1:\ttestb $1,%b4\n\t"
18 "je 2f\n\t"
19 "movsb\n"
20 "2:"
21 : "=&c" (d0), "=&D" (d1), "=&S" (d2)
22 : "0" (n / 4), "q" (n), "1" ((long)to), "2" ((long)from)
23 : "memory");
24 return to;
25}
26
27/* Even with __builtin_ the compiler may decide to use the out of line
28 function. */
29
30#define __HAVE_ARCH_MEMCPY 1
31extern void *memcpy(void *to, const void *from, size_t len);
32extern void *__memcpy(void *to, const void *from, size_t len);
33
34#ifndef CONFIG_KMEMCHECK
35#if (__GNUC__ == 4 && __GNUC_MINOR__ < 3) || __GNUC__ < 4
36#define memcpy(dst, src, len) \
37({ \
38 size_t __len = (len); \
39 void *__ret; \
40 if (__builtin_constant_p(len) && __len >= 64) \
41 __ret = __memcpy((dst), (src), __len); \
42 else \
43 __ret = __builtin_memcpy((dst), (src), __len); \
44 __ret; \
45})
46#endif
47#else
48/*
49 * kmemcheck becomes very happy if we use the REP instructions unconditionally,
50 * because it means that we know both memory operands in advance.
51 */
52#define memcpy(dst, src, len) __inline_memcpy((dst), (src), (len))
53#endif
54
55#define __HAVE_ARCH_MEMSET
56void *memset(void *s, int c, size_t n);
57void *__memset(void *s, int c, size_t n);
58
59#define __HAVE_ARCH_MEMMOVE
60void *memmove(void *dest, const void *src, size_t count);
61void *__memmove(void *dest, const void *src, size_t count);
62
63int memcmp(const void *cs, const void *ct, size_t count);
64size_t strlen(const char *s);
65char *strcpy(char *dest, const char *src);
66char *strcat(char *dest, const char *src);
67int strcmp(const char *cs, const char *ct);
68
69#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
70
71/*
72 * For files that not instrumented (e.g. mm/slub.c) we
73 * should use not instrumented version of mem* functions.
74 */
75
76#undef memcpy
77#define memcpy(dst, src, len) __memcpy(dst, src, len)
78#define memmove(dst, src, len) __memmove(dst, src, len)
79#define memset(s, c, n) __memset(s, c, n)
80#endif
81
82__must_check int memcpy_mcsafe_unrolled(void *dst, const void *src, size_t cnt);
83DECLARE_STATIC_KEY_FALSE(mcsafe_key);
84
85/**
86 * memcpy_mcsafe - copy memory with indication if a machine check happened
87 *
88 * @dst: destination address
89 * @src: source address
90 * @cnt: number of bytes to copy
91 *
92 * Low level memory copy function that catches machine checks
93 * We only call into the "safe" function on systems that can
94 * actually do machine check recovery. Everyone else can just
95 * use memcpy().
96 *
97 * Return 0 for success, -EFAULT for fail
98 */
99static __always_inline __must_check int
100memcpy_mcsafe(void *dst, const void *src, size_t cnt)
101{
102#ifdef CONFIG_X86_MCE
103 if (static_branch_unlikely(&mcsafe_key))
104 return memcpy_mcsafe_unrolled(dst, src, cnt);
105 else
106#endif
107 memcpy(dst, src, cnt);
108 return 0;
109}
110
111#endif /* __KERNEL__ */
112
113#endif /* _ASM_X86_STRING_64_H */
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_X86_STRING_64_H
3#define _ASM_X86_STRING_64_H
4
5#ifdef __KERNEL__
6#include <linux/jump_label.h>
7
8/* Written 2002 by Andi Kleen */
9
10/* Even with __builtin_ the compiler may decide to use the out of line
11 function. */
12
13#if defined(__SANITIZE_MEMORY__) && defined(__NO_FORTIFY)
14#include <linux/kmsan_string.h>
15#endif
16
17#define __HAVE_ARCH_MEMCPY 1
18#if defined(__SANITIZE_MEMORY__) && defined(__NO_FORTIFY)
19#undef memcpy
20#define memcpy __msan_memcpy
21#else
22extern void *memcpy(void *to, const void *from, size_t len);
23#endif
24extern void *__memcpy(void *to, const void *from, size_t len);
25
26#define __HAVE_ARCH_MEMSET
27#if defined(__SANITIZE_MEMORY__) && defined(__NO_FORTIFY)
28extern void *__msan_memset(void *s, int c, size_t n);
29#undef memset
30#define memset __msan_memset
31#else
32void *memset(void *s, int c, size_t n);
33#endif
34void *__memset(void *s, int c, size_t n);
35
36#define __HAVE_ARCH_MEMSET16
37static inline void *memset16(uint16_t *s, uint16_t v, size_t n)
38{
39 long d0, d1;
40 asm volatile("rep\n\t"
41 "stosw"
42 : "=&c" (d0), "=&D" (d1)
43 : "a" (v), "1" (s), "0" (n)
44 : "memory");
45 return s;
46}
47
48#define __HAVE_ARCH_MEMSET32
49static inline void *memset32(uint32_t *s, uint32_t v, size_t n)
50{
51 long d0, d1;
52 asm volatile("rep\n\t"
53 "stosl"
54 : "=&c" (d0), "=&D" (d1)
55 : "a" (v), "1" (s), "0" (n)
56 : "memory");
57 return s;
58}
59
60#define __HAVE_ARCH_MEMSET64
61static inline void *memset64(uint64_t *s, uint64_t v, size_t n)
62{
63 long d0, d1;
64 asm volatile("rep\n\t"
65 "stosq"
66 : "=&c" (d0), "=&D" (d1)
67 : "a" (v), "1" (s), "0" (n)
68 : "memory");
69 return s;
70}
71
72#define __HAVE_ARCH_MEMMOVE
73#if defined(__SANITIZE_MEMORY__) && defined(__NO_FORTIFY)
74#undef memmove
75void *__msan_memmove(void *dest, const void *src, size_t len);
76#define memmove __msan_memmove
77#else
78void *memmove(void *dest, const void *src, size_t count);
79#endif
80void *__memmove(void *dest, const void *src, size_t count);
81
82int memcmp(const void *cs, const void *ct, size_t count);
83size_t strlen(const char *s);
84char *strcpy(char *dest, const char *src);
85char *strcat(char *dest, const char *src);
86int strcmp(const char *cs, const char *ct);
87
88#if (defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__))
89/*
90 * For files that not instrumented (e.g. mm/slub.c) we
91 * should use not instrumented version of mem* functions.
92 */
93
94#undef memcpy
95#define memcpy(dst, src, len) __memcpy(dst, src, len)
96#undef memmove
97#define memmove(dst, src, len) __memmove(dst, src, len)
98#undef memset
99#define memset(s, c, n) __memset(s, c, n)
100
101#ifndef __NO_FORTIFY
102#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
103#endif
104
105#endif
106
107#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
108#define __HAVE_ARCH_MEMCPY_FLUSHCACHE 1
109void __memcpy_flushcache(void *dst, const void *src, size_t cnt);
110static __always_inline void memcpy_flushcache(void *dst, const void *src, size_t cnt)
111{
112 if (__builtin_constant_p(cnt)) {
113 switch (cnt) {
114 case 4:
115 asm ("movntil %1, %0" : "=m"(*(u32 *)dst) : "r"(*(u32 *)src));
116 return;
117 case 8:
118 asm ("movntiq %1, %0" : "=m"(*(u64 *)dst) : "r"(*(u64 *)src));
119 return;
120 case 16:
121 asm ("movntiq %1, %0" : "=m"(*(u64 *)dst) : "r"(*(u64 *)src));
122 asm ("movntiq %1, %0" : "=m"(*(u64 *)(dst + 8)) : "r"(*(u64 *)(src + 8)));
123 return;
124 }
125 }
126 __memcpy_flushcache(dst, src, cnt);
127}
128#endif
129
130#endif /* __KERNEL__ */
131
132#endif /* _ASM_X86_STRING_64_H */