Linux Audio

Check our new training course

Linux kernel drivers training

Mar 31-Apr 9, 2025, special US time zones
Register
Loading...
Note: File does not exist in v3.1.
  1/*
  2 * SPDX-License-Identifier: MIT
  3 *
  4 * Copyright © 2008 Intel Corporation
  5 */
  6
  7#include <linux/string.h>
  8#include <linux/bitops.h>
  9
 10#include "i915_drv.h"
 11#include "i915_gem.h"
 12#include "i915_gem_ioctls.h"
 13#include "i915_gem_mman.h"
 14#include "i915_gem_object.h"
 15
 16/**
 17 * DOC: buffer object tiling
 18 *
 19 * i915_gem_set_tiling_ioctl() and i915_gem_get_tiling_ioctl() is the userspace
 20 * interface to declare fence register requirements.
 21 *
 22 * In principle GEM doesn't care at all about the internal data layout of an
 23 * object, and hence it also doesn't care about tiling or swizzling. There's two
 24 * exceptions:
 25 *
 26 * - For X and Y tiling the hardware provides detilers for CPU access, so called
 27 *   fences. Since there's only a limited amount of them the kernel must manage
 28 *   these, and therefore userspace must tell the kernel the object tiling if it
 29 *   wants to use fences for detiling.
 30 * - On gen3 and gen4 platforms have a swizzling pattern for tiled objects which
 31 *   depends upon the physical page frame number. When swapping such objects the
 32 *   page frame number might change and the kernel must be able to fix this up
 33 *   and hence now the tiling. Note that on a subset of platforms with
 34 *   asymmetric memory channel population the swizzling pattern changes in an
 35 *   unknown way, and for those the kernel simply forbids swapping completely.
 36 *
 37 * Since neither of this applies for new tiling layouts on modern platforms like
 38 * W, Ys and Yf tiling GEM only allows object tiling to be set to X or Y tiled.
 39 * Anything else can be handled in userspace entirely without the kernel's
 40 * invovlement.
 41 */
 42
 43/**
 44 * i915_gem_fence_size - required global GTT size for a fence
 45 * @i915: i915 device
 46 * @size: object size
 47 * @tiling: tiling mode
 48 * @stride: tiling stride
 49 *
 50 * Return the required global GTT size for a fence (view of a tiled object),
 51 * taking into account potential fence register mapping.
 52 */
 53u32 i915_gem_fence_size(struct drm_i915_private *i915,
 54			u32 size, unsigned int tiling, unsigned int stride)
 55{
 56	u32 ggtt_size;
 57
 58	GEM_BUG_ON(!size);
 59
 60	if (tiling == I915_TILING_NONE)
 61		return size;
 62
 63	GEM_BUG_ON(!stride);
 64
 65	if (GRAPHICS_VER(i915) >= 4) {
 66		stride *= i915_gem_tile_height(tiling);
 67		GEM_BUG_ON(!IS_ALIGNED(stride, I965_FENCE_PAGE));
 68		return roundup(size, stride);
 69	}
 70
 71	/* Previous chips need a power-of-two fence region when tiling */
 72	if (GRAPHICS_VER(i915) == 3)
 73		ggtt_size = 1024*1024;
 74	else
 75		ggtt_size = 512*1024;
 76
 77	while (ggtt_size < size)
 78		ggtt_size <<= 1;
 79
 80	return ggtt_size;
 81}
 82
 83/**
 84 * i915_gem_fence_alignment - required global GTT alignment for a fence
 85 * @i915: i915 device
 86 * @size: object size
 87 * @tiling: tiling mode
 88 * @stride: tiling stride
 89 *
 90 * Return the required global GTT alignment for a fence (a view of a tiled
 91 * object), taking into account potential fence register mapping.
 92 */
 93u32 i915_gem_fence_alignment(struct drm_i915_private *i915, u32 size,
 94			     unsigned int tiling, unsigned int stride)
 95{
 96	GEM_BUG_ON(!size);
 97
 98	/*
 99	 * Minimum alignment is 4k (GTT page size), but might be greater
100	 * if a fence register is needed for the object.
101	 */
102	if (tiling == I915_TILING_NONE)
103		return I915_GTT_MIN_ALIGNMENT;
104
105	if (GRAPHICS_VER(i915) >= 4)
106		return I965_FENCE_PAGE;
107
108	/*
109	 * Previous chips need to be aligned to the size of the smallest
110	 * fence register that can contain the object.
111	 */
112	return i915_gem_fence_size(i915, size, tiling, stride);
113}
114
115/* Check pitch constriants for all chips & tiling formats */
116static bool
117i915_tiling_ok(struct drm_i915_gem_object *obj,
118	       unsigned int tiling, unsigned int stride)
119{
120	struct drm_i915_private *i915 = to_i915(obj->base.dev);
121	unsigned int tile_width;
122
123	/* Linear is always fine */
124	if (tiling == I915_TILING_NONE)
125		return true;
126
127	if (tiling > I915_TILING_LAST)
128		return false;
129
130	/* check maximum stride & object size */
131	/* i965+ stores the end address of the gtt mapping in the fence
132	 * reg, so dont bother to check the size */
133	if (GRAPHICS_VER(i915) >= 7) {
134		if (stride / 128 > GEN7_FENCE_MAX_PITCH_VAL)
135			return false;
136	} else if (GRAPHICS_VER(i915) >= 4) {
137		if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
138			return false;
139	} else {
140		if (stride > 8192)
141			return false;
142
143		if (!is_power_of_2(stride))
144			return false;
145	}
146
147	if (GRAPHICS_VER(i915) == 2 ||
148	    (tiling == I915_TILING_Y && HAS_128_BYTE_Y_TILING(i915)))
149		tile_width = 128;
150	else
151		tile_width = 512;
152
153	if (!stride || !IS_ALIGNED(stride, tile_width))
154		return false;
155
156	return true;
157}
158
159static bool i915_vma_fence_prepare(struct i915_vma *vma,
160				   int tiling_mode, unsigned int stride)
161{
162	struct drm_i915_private *i915 = vma->vm->i915;
163	u32 size, alignment;
164
165	if (!i915_vma_is_map_and_fenceable(vma))
166		return true;
167
168	size = i915_gem_fence_size(i915, vma->size, tiling_mode, stride);
169	if (vma->node.size < size)
170		return false;
171
172	alignment = i915_gem_fence_alignment(i915, vma->size, tiling_mode, stride);
173	if (!IS_ALIGNED(vma->node.start, alignment))
174		return false;
175
176	return true;
177}
178
179/* Make the current GTT allocation valid for the change in tiling. */
180static int
181i915_gem_object_fence_prepare(struct drm_i915_gem_object *obj,
182			      int tiling_mode, unsigned int stride)
183{
184	struct i915_ggtt *ggtt = &to_i915(obj->base.dev)->ggtt;
185	struct i915_vma *vma, *vn;
186	LIST_HEAD(unbind);
187	int ret = 0;
188
189	if (tiling_mode == I915_TILING_NONE)
190		return 0;
191
192	mutex_lock(&ggtt->vm.mutex);
193
194	spin_lock(&obj->vma.lock);
195	for_each_ggtt_vma(vma, obj) {
196		GEM_BUG_ON(vma->vm != &ggtt->vm);
197
198		if (i915_vma_fence_prepare(vma, tiling_mode, stride))
199			continue;
200
201		list_move(&vma->vm_link, &unbind);
202	}
203	spin_unlock(&obj->vma.lock);
204
205	list_for_each_entry_safe(vma, vn, &unbind, vm_link) {
206		ret = __i915_vma_unbind(vma);
207		if (ret) {
208			/* Restore the remaining vma on an error */
209			list_splice(&unbind, &ggtt->vm.bound_list);
210			break;
211		}
212	}
213
214	mutex_unlock(&ggtt->vm.mutex);
215
216	return ret;
217}
218
219int
220i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
221			   unsigned int tiling, unsigned int stride)
222{
223	struct drm_i915_private *i915 = to_i915(obj->base.dev);
224	struct i915_vma *vma;
225	int err;
226
227	/* Make sure we don't cross-contaminate obj->tiling_and_stride */
228	BUILD_BUG_ON(I915_TILING_LAST & STRIDE_MASK);
229
230	GEM_BUG_ON(!i915_tiling_ok(obj, tiling, stride));
231	GEM_BUG_ON(!stride ^ (tiling == I915_TILING_NONE));
232
233	if ((tiling | stride) == obj->tiling_and_stride)
234		return 0;
235
236	if (i915_gem_object_is_framebuffer(obj))
237		return -EBUSY;
238
239	/* We need to rebind the object if its current allocation
240	 * no longer meets the alignment restrictions for its new
241	 * tiling mode. Otherwise we can just leave it alone, but
242	 * need to ensure that any fence register is updated before
243	 * the next fenced (either through the GTT or by the BLT unit
244	 * on older GPUs) access.
245	 *
246	 * After updating the tiling parameters, we then flag whether
247	 * we need to update an associated fence register. Note this
248	 * has to also include the unfenced register the GPU uses
249	 * whilst executing a fenced command for an untiled object.
250	 */
251
252	i915_gem_object_lock(obj, NULL);
253	if (i915_gem_object_is_framebuffer(obj)) {
254		i915_gem_object_unlock(obj);
255		return -EBUSY;
256	}
257
258	err = i915_gem_object_fence_prepare(obj, tiling, stride);
259	if (err) {
260		i915_gem_object_unlock(obj);
261		return err;
262	}
263
264	/* If the memory has unknown (i.e. varying) swizzling, we pin the
265	 * pages to prevent them being swapped out and causing corruption
266	 * due to the change in swizzling.
267	 */
268	if (i915_gem_object_has_pages(obj) &&
269	    obj->mm.madv == I915_MADV_WILLNEED &&
270	    i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
271		if (tiling == I915_TILING_NONE) {
272			GEM_BUG_ON(!i915_gem_object_has_tiling_quirk(obj));
273			i915_gem_object_clear_tiling_quirk(obj);
274			i915_gem_object_make_shrinkable(obj);
275		}
276		if (!i915_gem_object_is_tiled(obj)) {
277			GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
278			i915_gem_object_make_unshrinkable(obj);
279			i915_gem_object_set_tiling_quirk(obj);
280		}
281	}
282
283	spin_lock(&obj->vma.lock);
284	for_each_ggtt_vma(vma, obj) {
285		vma->fence_size =
286			i915_gem_fence_size(i915, vma->size, tiling, stride);
287		vma->fence_alignment =
288			i915_gem_fence_alignment(i915,
289						 vma->size, tiling, stride);
290
291		if (vma->fence)
292			vma->fence->dirty = true;
293	}
294	spin_unlock(&obj->vma.lock);
295
296	obj->tiling_and_stride = tiling | stride;
297	i915_gem_object_unlock(obj);
298
299	/* Force the fence to be reacquired for GTT access */
300	i915_gem_object_release_mmap_gtt(obj);
301
302	/* Try to preallocate memory required to save swizzling on put-pages */
303	if (i915_gem_object_needs_bit17_swizzle(obj)) {
304		if (!obj->bit_17) {
305			obj->bit_17 = bitmap_zalloc(obj->base.size >> PAGE_SHIFT,
306						    GFP_KERNEL);
307		}
308	} else {
309		bitmap_free(obj->bit_17);
310		obj->bit_17 = NULL;
311	}
312
313	return 0;
314}
315
316/**
317 * i915_gem_set_tiling_ioctl - IOCTL handler to set tiling mode
318 * @dev: DRM device
319 * @data: data pointer for the ioctl
320 * @file: DRM file for the ioctl call
321 *
322 * Sets the tiling mode of an object, returning the required swizzling of
323 * bit 6 of addresses in the object.
324 *
325 * Called by the user via ioctl.
326 *
327 * Returns:
328 * Zero on success, negative errno on failure.
329 */
330int
331i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
332			  struct drm_file *file)
333{
334	struct drm_i915_private *dev_priv = to_i915(dev);
335	struct drm_i915_gem_set_tiling *args = data;
336	struct drm_i915_gem_object *obj;
337	int err;
338
339	if (!dev_priv->ggtt.num_fences)
340		return -EOPNOTSUPP;
341
342	obj = i915_gem_object_lookup(file, args->handle);
343	if (!obj)
344		return -ENOENT;
345
346	/*
347	 * The tiling mode of proxy objects is handled by its generator, and
348	 * not allowed to be changed by userspace.
349	 */
350	if (i915_gem_object_is_proxy(obj)) {
351		err = -ENXIO;
352		goto err;
353	}
354
355	if (!i915_tiling_ok(obj, args->tiling_mode, args->stride)) {
356		err = -EINVAL;
357		goto err;
358	}
359
360	if (args->tiling_mode == I915_TILING_NONE) {
361		args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
362		args->stride = 0;
363	} else {
364		if (args->tiling_mode == I915_TILING_X)
365			args->swizzle_mode = to_i915(dev)->ggtt.bit_6_swizzle_x;
366		else
367			args->swizzle_mode = to_i915(dev)->ggtt.bit_6_swizzle_y;
368
369		/* Hide bit 17 swizzling from the user.  This prevents old Mesa
370		 * from aborting the application on sw fallbacks to bit 17,
371		 * and we use the pread/pwrite bit17 paths to swizzle for it.
372		 * If there was a user that was relying on the swizzle
373		 * information for drm_intel_bo_map()ed reads/writes this would
374		 * break it, but we don't have any of those.
375		 */
376		if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
377			args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
378		if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
379			args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
380
381		/* If we can't handle the swizzling, make it untiled. */
382		if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) {
383			args->tiling_mode = I915_TILING_NONE;
384			args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
385			args->stride = 0;
386		}
387	}
388
389	err = i915_gem_object_set_tiling(obj, args->tiling_mode, args->stride);
390
391	/* We have to maintain this existing ABI... */
392	args->stride = i915_gem_object_get_stride(obj);
393	args->tiling_mode = i915_gem_object_get_tiling(obj);
394
395err:
396	i915_gem_object_put(obj);
397	return err;
398}
399
400/**
401 * i915_gem_get_tiling_ioctl - IOCTL handler to get tiling mode
402 * @dev: DRM device
403 * @data: data pointer for the ioctl
404 * @file: DRM file for the ioctl call
405 *
406 * Returns the current tiling mode and required bit 6 swizzling for the object.
407 *
408 * Called by the user via ioctl.
409 *
410 * Returns:
411 * Zero on success, negative errno on failure.
412 */
413int
414i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
415			  struct drm_file *file)
416{
417	struct drm_i915_gem_get_tiling *args = data;
418	struct drm_i915_private *dev_priv = to_i915(dev);
419	struct drm_i915_gem_object *obj;
420	int err = -ENOENT;
421
422	if (!dev_priv->ggtt.num_fences)
423		return -EOPNOTSUPP;
424
425	rcu_read_lock();
426	obj = i915_gem_object_lookup_rcu(file, args->handle);
427	if (obj) {
428		args->tiling_mode =
429			READ_ONCE(obj->tiling_and_stride) & TILING_MASK;
430		err = 0;
431	}
432	rcu_read_unlock();
433	if (unlikely(err))
434		return err;
435
436	switch (args->tiling_mode) {
437	case I915_TILING_X:
438		args->swizzle_mode = dev_priv->ggtt.bit_6_swizzle_x;
439		break;
440	case I915_TILING_Y:
441		args->swizzle_mode = dev_priv->ggtt.bit_6_swizzle_y;
442		break;
443	default:
444	case I915_TILING_NONE:
445		args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
446		break;
447	}
448
449	/* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */
450	if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
451		args->phys_swizzle_mode = I915_BIT_6_SWIZZLE_UNKNOWN;
452	else
453		args->phys_swizzle_mode = args->swizzle_mode;
454	if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
455		args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
456	if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
457		args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
458
459	return 0;
460}