Linux Audio

Check our new training course

Linux kernel drivers training

Mar 31-Apr 9, 2025, special US time zones
Register
Loading...
Note: File does not exist in v4.6.
  1/*
  2 * Copyright © 2017 Intel Corporation
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice (including the next
 12 * paragraph) shall be included in all copies or substantial portions of the
 13 * Software.
 14 *
 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 21 * IN THE SOFTWARE.
 22 *
 23 */
 24
 25#include <linux/prime_numbers.h>
 26
 27#include "../i915_selftest.h"
 28#include "i915_random.h"
 29
 30static int cpu_set(struct drm_i915_gem_object *obj,
 31		   unsigned long offset,
 32		   u32 v)
 33{
 34	unsigned int needs_clflush;
 35	struct page *page;
 36	u32 *map;
 37	int err;
 38
 39	err = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
 40	if (err)
 41		return err;
 42
 43	page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
 44	map = kmap_atomic(page);
 45	if (needs_clflush & CLFLUSH_BEFORE)
 46		clflush(map+offset_in_page(offset) / sizeof(*map));
 47	map[offset_in_page(offset) / sizeof(*map)] = v;
 48	if (needs_clflush & CLFLUSH_AFTER)
 49		clflush(map+offset_in_page(offset) / sizeof(*map));
 50	kunmap_atomic(map);
 51
 52	i915_gem_obj_finish_shmem_access(obj);
 53	return 0;
 54}
 55
 56static int cpu_get(struct drm_i915_gem_object *obj,
 57		   unsigned long offset,
 58		   u32 *v)
 59{
 60	unsigned int needs_clflush;
 61	struct page *page;
 62	u32 *map;
 63	int err;
 64
 65	err = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
 66	if (err)
 67		return err;
 68
 69	page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
 70	map = kmap_atomic(page);
 71	if (needs_clflush & CLFLUSH_BEFORE)
 72		clflush(map+offset_in_page(offset) / sizeof(*map));
 73	*v = map[offset_in_page(offset) / sizeof(*map)];
 74	kunmap_atomic(map);
 75
 76	i915_gem_obj_finish_shmem_access(obj);
 77	return 0;
 78}
 79
 80static int gtt_set(struct drm_i915_gem_object *obj,
 81		   unsigned long offset,
 82		   u32 v)
 83{
 84	struct i915_vma *vma;
 85	u32 __iomem *map;
 86	int err;
 87
 88	err = i915_gem_object_set_to_gtt_domain(obj, true);
 89	if (err)
 90		return err;
 91
 92	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
 93	if (IS_ERR(vma))
 94		return PTR_ERR(vma);
 95
 96	map = i915_vma_pin_iomap(vma);
 97	i915_vma_unpin(vma);
 98	if (IS_ERR(map))
 99		return PTR_ERR(map);
100
101	iowrite32(v, &map[offset / sizeof(*map)]);
102	i915_vma_unpin_iomap(vma);
103
104	return 0;
105}
106
107static int gtt_get(struct drm_i915_gem_object *obj,
108		   unsigned long offset,
109		   u32 *v)
110{
111	struct i915_vma *vma;
112	u32 __iomem *map;
113	int err;
114
115	err = i915_gem_object_set_to_gtt_domain(obj, false);
116	if (err)
117		return err;
118
119	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
120	if (IS_ERR(vma))
121		return PTR_ERR(vma);
122
123	map = i915_vma_pin_iomap(vma);
124	i915_vma_unpin(vma);
125	if (IS_ERR(map))
126		return PTR_ERR(map);
127
128	*v = ioread32(&map[offset / sizeof(*map)]);
129	i915_vma_unpin_iomap(vma);
130
131	return 0;
132}
133
134static int wc_set(struct drm_i915_gem_object *obj,
135		  unsigned long offset,
136		  u32 v)
137{
138	u32 *map;
139	int err;
140
141	err = i915_gem_object_set_to_wc_domain(obj, true);
142	if (err)
143		return err;
144
145	map = i915_gem_object_pin_map(obj, I915_MAP_WC);
146	if (IS_ERR(map))
147		return PTR_ERR(map);
148
149	map[offset / sizeof(*map)] = v;
150	i915_gem_object_unpin_map(obj);
151
152	return 0;
153}
154
155static int wc_get(struct drm_i915_gem_object *obj,
156		  unsigned long offset,
157		  u32 *v)
158{
159	u32 *map;
160	int err;
161
162	err = i915_gem_object_set_to_wc_domain(obj, false);
163	if (err)
164		return err;
165
166	map = i915_gem_object_pin_map(obj, I915_MAP_WC);
167	if (IS_ERR(map))
168		return PTR_ERR(map);
169
170	*v = map[offset / sizeof(*map)];
171	i915_gem_object_unpin_map(obj);
172
173	return 0;
174}
175
176static int gpu_set(struct drm_i915_gem_object *obj,
177		   unsigned long offset,
178		   u32 v)
179{
180	struct drm_i915_private *i915 = to_i915(obj->base.dev);
181	struct i915_request *rq;
182	struct i915_vma *vma;
183	u32 *cs;
184	int err;
185
186	err = i915_gem_object_set_to_gtt_domain(obj, true);
187	if (err)
188		return err;
189
190	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
191	if (IS_ERR(vma))
192		return PTR_ERR(vma);
193
194	rq = i915_request_alloc(i915->engine[RCS], i915->kernel_context);
195	if (IS_ERR(rq)) {
196		i915_vma_unpin(vma);
197		return PTR_ERR(rq);
198	}
199
200	cs = intel_ring_begin(rq, 4);
201	if (IS_ERR(cs)) {
202		__i915_request_add(rq, false);
203		i915_vma_unpin(vma);
204		return PTR_ERR(cs);
205	}
206
207	if (INTEL_GEN(i915) >= 8) {
208		*cs++ = MI_STORE_DWORD_IMM_GEN4 | 1 << 22;
209		*cs++ = lower_32_bits(i915_ggtt_offset(vma) + offset);
210		*cs++ = upper_32_bits(i915_ggtt_offset(vma) + offset);
211		*cs++ = v;
212	} else if (INTEL_GEN(i915) >= 4) {
213		*cs++ = MI_STORE_DWORD_IMM_GEN4 | 1 << 22;
214		*cs++ = 0;
215		*cs++ = i915_ggtt_offset(vma) + offset;
216		*cs++ = v;
217	} else {
218		*cs++ = MI_STORE_DWORD_IMM | 1 << 22;
219		*cs++ = i915_ggtt_offset(vma) + offset;
220		*cs++ = v;
221		*cs++ = MI_NOOP;
222	}
223	intel_ring_advance(rq, cs);
224
225	i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
226	i915_vma_unpin(vma);
227
228	reservation_object_lock(obj->resv, NULL);
229	reservation_object_add_excl_fence(obj->resv, &rq->fence);
230	reservation_object_unlock(obj->resv);
231
232	__i915_request_add(rq, true);
233
234	return 0;
235}
236
237static bool always_valid(struct drm_i915_private *i915)
238{
239	return true;
240}
241
242static bool needs_mi_store_dword(struct drm_i915_private *i915)
243{
244	return intel_engine_can_store_dword(i915->engine[RCS]);
245}
246
247static const struct igt_coherency_mode {
248	const char *name;
249	int (*set)(struct drm_i915_gem_object *, unsigned long offset, u32 v);
250	int (*get)(struct drm_i915_gem_object *, unsigned long offset, u32 *v);
251	bool (*valid)(struct drm_i915_private *i915);
252} igt_coherency_mode[] = {
253	{ "cpu", cpu_set, cpu_get, always_valid },
254	{ "gtt", gtt_set, gtt_get, always_valid },
255	{ "wc", wc_set, wc_get, always_valid },
256	{ "gpu", gpu_set, NULL, needs_mi_store_dword },
257	{ },
258};
259
260static int igt_gem_coherency(void *arg)
261{
262	const unsigned int ncachelines = PAGE_SIZE/64;
263	I915_RND_STATE(prng);
264	struct drm_i915_private *i915 = arg;
265	const struct igt_coherency_mode *read, *write, *over;
266	struct drm_i915_gem_object *obj;
267	unsigned long count, n;
268	u32 *offsets, *values;
269	int err = 0;
270
271	/* We repeatedly write, overwrite and read from a sequence of
272	 * cachelines in order to try and detect incoherency (unflushed writes
273	 * from either the CPU or GPU). Each setter/getter uses our cache
274	 * domain API which should prevent incoherency.
275	 */
276
277	offsets = kmalloc_array(ncachelines, 2*sizeof(u32), GFP_KERNEL);
278	if (!offsets)
279		return -ENOMEM;
280	for (count = 0; count < ncachelines; count++)
281		offsets[count] = count * 64 + 4 * (count % 16);
282
283	values = offsets + ncachelines;
284
285	mutex_lock(&i915->drm.struct_mutex);
286	for (over = igt_coherency_mode; over->name; over++) {
287		if (!over->set)
288			continue;
289
290		if (!over->valid(i915))
291			continue;
292
293		for (write = igt_coherency_mode; write->name; write++) {
294			if (!write->set)
295				continue;
296
297			if (!write->valid(i915))
298				continue;
299
300			for (read = igt_coherency_mode; read->name; read++) {
301				if (!read->get)
302					continue;
303
304				if (!read->valid(i915))
305					continue;
306
307				for_each_prime_number_from(count, 1, ncachelines) {
308					obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
309					if (IS_ERR(obj)) {
310						err = PTR_ERR(obj);
311						goto unlock;
312					}
313
314					i915_random_reorder(offsets, ncachelines, &prng);
315					for (n = 0; n < count; n++)
316						values[n] = prandom_u32_state(&prng);
317
318					for (n = 0; n < count; n++) {
319						err = over->set(obj, offsets[n], ~values[n]);
320						if (err) {
321							pr_err("Failed to set stale value[%ld/%ld] in object using %s, err=%d\n",
322							       n, count, over->name, err);
323							goto put_object;
324						}
325					}
326
327					for (n = 0; n < count; n++) {
328						err = write->set(obj, offsets[n], values[n]);
329						if (err) {
330							pr_err("Failed to set value[%ld/%ld] in object using %s, err=%d\n",
331							       n, count, write->name, err);
332							goto put_object;
333						}
334					}
335
336					for (n = 0; n < count; n++) {
337						u32 found;
338
339						err = read->get(obj, offsets[n], &found);
340						if (err) {
341							pr_err("Failed to get value[%ld/%ld] in object using %s, err=%d\n",
342							       n, count, read->name, err);
343							goto put_object;
344						}
345
346						if (found != values[n]) {
347							pr_err("Value[%ld/%ld] mismatch, (overwrite with %s) wrote [%s] %x read [%s] %x (inverse %x), at offset %x\n",
348							       n, count, over->name,
349							       write->name, values[n],
350							       read->name, found,
351							       ~values[n], offsets[n]);
352							err = -EINVAL;
353							goto put_object;
354						}
355					}
356
357					__i915_gem_object_release_unless_active(obj);
358				}
359			}
360		}
361	}
362unlock:
363	mutex_unlock(&i915->drm.struct_mutex);
364	kfree(offsets);
365	return err;
366
367put_object:
368	__i915_gem_object_release_unless_active(obj);
369	goto unlock;
370}
371
372int i915_gem_coherency_live_selftests(struct drm_i915_private *i915)
373{
374	static const struct i915_subtest tests[] = {
375		SUBTEST(igt_gem_coherency),
376	};
377
378	return i915_subtests(tests, i915);
379}