Loading...
Note: File does not exist in v3.1.
1/*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25#include <linux/kernel.h>
26#include <linux/string.h>
27#include <linux/cpufeature.h>
28#include <asm/fpu/api.h>
29
30#include "i915_memcpy.h"
31
32#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
33#define CI_BUG_ON(expr) BUG_ON(expr)
34#else
35#define CI_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
36#endif
37
38static DEFINE_STATIC_KEY_FALSE(has_movntdqa);
39
40static void __memcpy_ntdqa(void *dst, const void *src, unsigned long len)
41{
42 kernel_fpu_begin();
43
44 while (len >= 4) {
45 asm("movntdqa (%0), %%xmm0\n"
46 "movntdqa 16(%0), %%xmm1\n"
47 "movntdqa 32(%0), %%xmm2\n"
48 "movntdqa 48(%0), %%xmm3\n"
49 "movaps %%xmm0, (%1)\n"
50 "movaps %%xmm1, 16(%1)\n"
51 "movaps %%xmm2, 32(%1)\n"
52 "movaps %%xmm3, 48(%1)\n"
53 :: "r" (src), "r" (dst) : "memory");
54 src += 64;
55 dst += 64;
56 len -= 4;
57 }
58 while (len--) {
59 asm("movntdqa (%0), %%xmm0\n"
60 "movaps %%xmm0, (%1)\n"
61 :: "r" (src), "r" (dst) : "memory");
62 src += 16;
63 dst += 16;
64 }
65
66 kernel_fpu_end();
67}
68
69static void __memcpy_ntdqu(void *dst, const void *src, unsigned long len)
70{
71 kernel_fpu_begin();
72
73 while (len >= 4) {
74 asm("movntdqa (%0), %%xmm0\n"
75 "movntdqa 16(%0), %%xmm1\n"
76 "movntdqa 32(%0), %%xmm2\n"
77 "movntdqa 48(%0), %%xmm3\n"
78 "movups %%xmm0, (%1)\n"
79 "movups %%xmm1, 16(%1)\n"
80 "movups %%xmm2, 32(%1)\n"
81 "movups %%xmm3, 48(%1)\n"
82 :: "r" (src), "r" (dst) : "memory");
83 src += 64;
84 dst += 64;
85 len -= 4;
86 }
87 while (len--) {
88 asm("movntdqa (%0), %%xmm0\n"
89 "movups %%xmm0, (%1)\n"
90 :: "r" (src), "r" (dst) : "memory");
91 src += 16;
92 dst += 16;
93 }
94
95 kernel_fpu_end();
96}
97
98/**
99 * i915_memcpy_from_wc: perform an accelerated *aligned* read from WC
100 * @dst: destination pointer
101 * @src: source pointer
102 * @len: how many bytes to copy
103 *
104 * i915_memcpy_from_wc copies @len bytes from @src to @dst using
105 * non-temporal instructions where available. Note that all arguments
106 * (@src, @dst) must be aligned to 16 bytes and @len must be a multiple
107 * of 16.
108 *
109 * To test whether accelerated reads from WC are supported, use
110 * i915_memcpy_from_wc(NULL, NULL, 0);
111 *
112 * Returns true if the copy was successful, false if the preconditions
113 * are not met.
114 */
115bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len)
116{
117 if (unlikely(((unsigned long)dst | (unsigned long)src | len) & 15))
118 return false;
119
120 if (static_branch_likely(&has_movntdqa)) {
121 if (likely(len))
122 __memcpy_ntdqa(dst, src, len >> 4);
123 return true;
124 }
125
126 return false;
127}
128
129/**
130 * i915_unaligned_memcpy_from_wc: perform a mostly accelerated read from WC
131 * @dst: destination pointer
132 * @src: source pointer
133 * @len: how many bytes to copy
134 *
135 * Like i915_memcpy_from_wc(), the unaligned variant copies @len bytes from
136 * @src to @dst using * non-temporal instructions where available, but
137 * accepts that its arguments may not be aligned, but are valid for the
138 * potential 16-byte read past the end.
139 */
140void i915_unaligned_memcpy_from_wc(void *dst, const void *src, unsigned long len)
141{
142 unsigned long addr;
143
144 CI_BUG_ON(!i915_has_memcpy_from_wc());
145
146 addr = (unsigned long)src;
147 if (!IS_ALIGNED(addr, 16)) {
148 unsigned long x = min(ALIGN(addr, 16) - addr, len);
149
150 memcpy(dst, src, x);
151
152 len -= x;
153 dst += x;
154 src += x;
155 }
156
157 if (likely(len))
158 __memcpy_ntdqu(dst, src, DIV_ROUND_UP(len, 16));
159}
160
161void i915_memcpy_init_early(struct drm_i915_private *dev_priv)
162{
163 /*
164 * Some hypervisors (e.g. KVM) don't support VEX-prefix instructions
165 * emulation. So don't enable movntdqa in hypervisor guest.
166 */
167 if (static_cpu_has(X86_FEATURE_XMM4_1) &&
168 !boot_cpu_has(X86_FEATURE_HYPERVISOR))
169 static_branch_enable(&has_movntdqa);
170}