Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1/*
  2 * SPDX-License-Identifier: MIT
  3 *
  4 * Copyright © 2008 Intel Corporation
  5 */
  6
  7#include <linux/string.h>
  8#include <linux/bitops.h>
  9
 10#include "i915_drv.h"
 11#include "i915_gem.h"
 12#include "i915_gem_ioctls.h"
 13#include "i915_gem_mman.h"
 14#include "i915_gem_object.h"
 15
 16/**
 17 * DOC: buffer object tiling
 18 *
 19 * i915_gem_set_tiling_ioctl() and i915_gem_get_tiling_ioctl() is the userspace
 20 * interface to declare fence register requirements.
 21 *
 22 * In principle GEM doesn't care at all about the internal data layout of an
 23 * object, and hence it also doesn't care about tiling or swizzling. There's two
 24 * exceptions:
 25 *
 26 * - For X and Y tiling the hardware provides detilers for CPU access, so called
 27 *   fences. Since there's only a limited amount of them the kernel must manage
 28 *   these, and therefore userspace must tell the kernel the object tiling if it
 29 *   wants to use fences for detiling.
 30 * - On gen3 and gen4 platforms have a swizzling pattern for tiled objects which
 31 *   depends upon the physical page frame number. When swapping such objects the
 32 *   page frame number might change and the kernel must be able to fix this up
 33 *   and hence now the tiling. Note that on a subset of platforms with
 34 *   asymmetric memory channel population the swizzling pattern changes in an
 35 *   unknown way, and for those the kernel simply forbids swapping completely.
 36 *
 37 * Since neither of this applies for new tiling layouts on modern platforms like
 38 * W, Ys and Yf tiling GEM only allows object tiling to be set to X or Y tiled.
 39 * Anything else can be handled in userspace entirely without the kernel's
 40 * invovlement.
 41 */
 42
 43/**
 44 * i915_gem_fence_size - required global GTT size for a fence
 45 * @i915: i915 device
 46 * @size: object size
 47 * @tiling: tiling mode
 48 * @stride: tiling stride
 49 *
 50 * Return the required global GTT size for a fence (view of a tiled object),
 51 * taking into account potential fence register mapping.
 52 */
 53u32 i915_gem_fence_size(struct drm_i915_private *i915,
 54			u32 size, unsigned int tiling, unsigned int stride)
 55{
 56	u32 ggtt_size;
 57
 58	GEM_BUG_ON(!size);
 59
 60	if (tiling == I915_TILING_NONE)
 61		return size;
 62
 63	GEM_BUG_ON(!stride);
 64
 65	if (INTEL_GEN(i915) >= 4) {
 66		stride *= i915_gem_tile_height(tiling);
 67		GEM_BUG_ON(!IS_ALIGNED(stride, I965_FENCE_PAGE));
 68		return roundup(size, stride);
 69	}
 70
 71	/* Previous chips need a power-of-two fence region when tiling */
 72	if (IS_GEN(i915, 3))
 73		ggtt_size = 1024*1024;
 74	else
 75		ggtt_size = 512*1024;
 76
 77	while (ggtt_size < size)
 78		ggtt_size <<= 1;
 79
 80	return ggtt_size;
 81}
 82
 83/**
 84 * i915_gem_fence_alignment - required global GTT alignment for a fence
 85 * @i915: i915 device
 86 * @size: object size
 87 * @tiling: tiling mode
 88 * @stride: tiling stride
 89 *
 90 * Return the required global GTT alignment for a fence (a view of a tiled
 91 * object), taking into account potential fence register mapping.
 92 */
 93u32 i915_gem_fence_alignment(struct drm_i915_private *i915, u32 size,
 94			     unsigned int tiling, unsigned int stride)
 95{
 96	GEM_BUG_ON(!size);
 97
 98	/*
 99	 * Minimum alignment is 4k (GTT page size), but might be greater
100	 * if a fence register is needed for the object.
101	 */
102	if (tiling == I915_TILING_NONE)
103		return I915_GTT_MIN_ALIGNMENT;
104
105	if (INTEL_GEN(i915) >= 4)
106		return I965_FENCE_PAGE;
107
108	/*
109	 * Previous chips need to be aligned to the size of the smallest
110	 * fence register that can contain the object.
111	 */
112	return i915_gem_fence_size(i915, size, tiling, stride);
113}
114
115/* Check pitch constriants for all chips & tiling formats */
116static bool
117i915_tiling_ok(struct drm_i915_gem_object *obj,
118	       unsigned int tiling, unsigned int stride)
119{
120	struct drm_i915_private *i915 = to_i915(obj->base.dev);
121	unsigned int tile_width;
122
123	/* Linear is always fine */
124	if (tiling == I915_TILING_NONE)
125		return true;
126
127	if (tiling > I915_TILING_LAST)
128		return false;
129
130	/* check maximum stride & object size */
131	/* i965+ stores the end address of the gtt mapping in the fence
132	 * reg, so dont bother to check the size */
133	if (INTEL_GEN(i915) >= 7) {
134		if (stride / 128 > GEN7_FENCE_MAX_PITCH_VAL)
135			return false;
136	} else if (INTEL_GEN(i915) >= 4) {
137		if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
138			return false;
139	} else {
140		if (stride > 8192)
141			return false;
142
143		if (!is_power_of_2(stride))
144			return false;
145	}
146
147	if (IS_GEN(i915, 2) ||
148	    (tiling == I915_TILING_Y && HAS_128_BYTE_Y_TILING(i915)))
149		tile_width = 128;
150	else
151		tile_width = 512;
152
153	if (!stride || !IS_ALIGNED(stride, tile_width))
154		return false;
155
156	return true;
157}
158
159static bool i915_vma_fence_prepare(struct i915_vma *vma,
160				   int tiling_mode, unsigned int stride)
161{
162	struct drm_i915_private *i915 = vma->vm->i915;
163	u32 size, alignment;
164
165	if (!i915_vma_is_map_and_fenceable(vma))
166		return true;
167
168	size = i915_gem_fence_size(i915, vma->size, tiling_mode, stride);
169	if (vma->node.size < size)
170		return false;
171
172	alignment = i915_gem_fence_alignment(i915, vma->size, tiling_mode, stride);
173	if (!IS_ALIGNED(vma->node.start, alignment))
174		return false;
175
176	return true;
177}
178
179/* Make the current GTT allocation valid for the change in tiling. */
180static int
181i915_gem_object_fence_prepare(struct drm_i915_gem_object *obj,
182			      int tiling_mode, unsigned int stride)
183{
184	struct i915_ggtt *ggtt = &to_i915(obj->base.dev)->ggtt;
185	struct i915_vma *vma, *vn;
186	LIST_HEAD(unbind);
187	int ret = 0;
188
189	if (tiling_mode == I915_TILING_NONE)
190		return 0;
191
192	mutex_lock(&ggtt->vm.mutex);
193
194	spin_lock(&obj->vma.lock);
195	for_each_ggtt_vma(vma, obj) {
196		GEM_BUG_ON(vma->vm != &ggtt->vm);
197
198		if (i915_vma_fence_prepare(vma, tiling_mode, stride))
199			continue;
200
201		list_move(&vma->vm_link, &unbind);
202	}
203	spin_unlock(&obj->vma.lock);
204
205	list_for_each_entry_safe(vma, vn, &unbind, vm_link) {
206		ret = __i915_vma_unbind(vma);
207		if (ret) {
208			/* Restore the remaining vma on an error */
209			list_splice(&unbind, &ggtt->vm.bound_list);
210			break;
211		}
212	}
213
214	mutex_unlock(&ggtt->vm.mutex);
215
216	return ret;
217}
218
219int
220i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
221			   unsigned int tiling, unsigned int stride)
222{
223	struct drm_i915_private *i915 = to_i915(obj->base.dev);
224	struct i915_vma *vma;
225	int err;
226
227	/* Make sure we don't cross-contaminate obj->tiling_and_stride */
228	BUILD_BUG_ON(I915_TILING_LAST & STRIDE_MASK);
229
230	GEM_BUG_ON(!i915_tiling_ok(obj, tiling, stride));
231	GEM_BUG_ON(!stride ^ (tiling == I915_TILING_NONE));
232
233	if ((tiling | stride) == obj->tiling_and_stride)
234		return 0;
235
236	if (i915_gem_object_is_framebuffer(obj))
237		return -EBUSY;
238
239	/* We need to rebind the object if its current allocation
240	 * no longer meets the alignment restrictions for its new
241	 * tiling mode. Otherwise we can just leave it alone, but
242	 * need to ensure that any fence register is updated before
243	 * the next fenced (either through the GTT or by the BLT unit
244	 * on older GPUs) access.
245	 *
246	 * After updating the tiling parameters, we then flag whether
247	 * we need to update an associated fence register. Note this
248	 * has to also include the unfenced register the GPU uses
249	 * whilst executing a fenced command for an untiled object.
250	 */
251
252	i915_gem_object_lock(obj);
253	if (i915_gem_object_is_framebuffer(obj)) {
254		i915_gem_object_unlock(obj);
255		return -EBUSY;
256	}
257
258	err = i915_gem_object_fence_prepare(obj, tiling, stride);
259	if (err) {
260		i915_gem_object_unlock(obj);
261		return err;
262	}
263
264	/* If the memory has unknown (i.e. varying) swizzling, we pin the
265	 * pages to prevent them being swapped out and causing corruption
266	 * due to the change in swizzling.
267	 */
268	mutex_lock(&obj->mm.lock);
269	if (i915_gem_object_has_pages(obj) &&
270	    obj->mm.madv == I915_MADV_WILLNEED &&
271	    i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
272		if (tiling == I915_TILING_NONE) {
273			GEM_BUG_ON(!obj->mm.quirked);
274			__i915_gem_object_unpin_pages(obj);
275			obj->mm.quirked = false;
276		}
277		if (!i915_gem_object_is_tiled(obj)) {
278			GEM_BUG_ON(obj->mm.quirked);
279			__i915_gem_object_pin_pages(obj);
280			obj->mm.quirked = true;
281		}
282	}
283	mutex_unlock(&obj->mm.lock);
284
285	spin_lock(&obj->vma.lock);
286	for_each_ggtt_vma(vma, obj) {
287		vma->fence_size =
288			i915_gem_fence_size(i915, vma->size, tiling, stride);
289		vma->fence_alignment =
290			i915_gem_fence_alignment(i915,
291						 vma->size, tiling, stride);
292
293		if (vma->fence)
294			vma->fence->dirty = true;
295	}
296	spin_unlock(&obj->vma.lock);
297
298	obj->tiling_and_stride = tiling | stride;
299	i915_gem_object_unlock(obj);
300
301	/* Force the fence to be reacquired for GTT access */
302	i915_gem_object_release_mmap_gtt(obj);
303
304	/* Try to preallocate memory required to save swizzling on put-pages */
305	if (i915_gem_object_needs_bit17_swizzle(obj)) {
306		if (!obj->bit_17) {
307			obj->bit_17 = bitmap_zalloc(obj->base.size >> PAGE_SHIFT,
308						    GFP_KERNEL);
309		}
310	} else {
311		bitmap_free(obj->bit_17);
312		obj->bit_17 = NULL;
313	}
314
315	return 0;
316}
317
318/**
319 * i915_gem_set_tiling_ioctl - IOCTL handler to set tiling mode
320 * @dev: DRM device
321 * @data: data pointer for the ioctl
322 * @file: DRM file for the ioctl call
323 *
324 * Sets the tiling mode of an object, returning the required swizzling of
325 * bit 6 of addresses in the object.
326 *
327 * Called by the user via ioctl.
328 *
329 * Returns:
330 * Zero on success, negative errno on failure.
331 */
332int
333i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
334			  struct drm_file *file)
335{
336	struct drm_i915_private *dev_priv = to_i915(dev);
337	struct drm_i915_gem_set_tiling *args = data;
338	struct drm_i915_gem_object *obj;
339	int err;
340
341	if (!dev_priv->ggtt.num_fences)
342		return -EOPNOTSUPP;
343
344	obj = i915_gem_object_lookup(file, args->handle);
345	if (!obj)
346		return -ENOENT;
347
348	/*
349	 * The tiling mode of proxy objects is handled by its generator, and
350	 * not allowed to be changed by userspace.
351	 */
352	if (i915_gem_object_is_proxy(obj)) {
353		err = -ENXIO;
354		goto err;
355	}
356
357	if (!i915_tiling_ok(obj, args->tiling_mode, args->stride)) {
358		err = -EINVAL;
359		goto err;
360	}
361
362	if (args->tiling_mode == I915_TILING_NONE) {
363		args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
364		args->stride = 0;
365	} else {
366		if (args->tiling_mode == I915_TILING_X)
367			args->swizzle_mode = to_i915(dev)->ggtt.bit_6_swizzle_x;
368		else
369			args->swizzle_mode = to_i915(dev)->ggtt.bit_6_swizzle_y;
370
371		/* Hide bit 17 swizzling from the user.  This prevents old Mesa
372		 * from aborting the application on sw fallbacks to bit 17,
373		 * and we use the pread/pwrite bit17 paths to swizzle for it.
374		 * If there was a user that was relying on the swizzle
375		 * information for drm_intel_bo_map()ed reads/writes this would
376		 * break it, but we don't have any of those.
377		 */
378		if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
379			args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
380		if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
381			args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
382
383		/* If we can't handle the swizzling, make it untiled. */
384		if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) {
385			args->tiling_mode = I915_TILING_NONE;
386			args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
387			args->stride = 0;
388		}
389	}
390
391	err = i915_gem_object_set_tiling(obj, args->tiling_mode, args->stride);
392
393	/* We have to maintain this existing ABI... */
394	args->stride = i915_gem_object_get_stride(obj);
395	args->tiling_mode = i915_gem_object_get_tiling(obj);
396
397err:
398	i915_gem_object_put(obj);
399	return err;
400}
401
402/**
403 * i915_gem_get_tiling_ioctl - IOCTL handler to get tiling mode
404 * @dev: DRM device
405 * @data: data pointer for the ioctl
406 * @file: DRM file for the ioctl call
407 *
408 * Returns the current tiling mode and required bit 6 swizzling for the object.
409 *
410 * Called by the user via ioctl.
411 *
412 * Returns:
413 * Zero on success, negative errno on failure.
414 */
415int
416i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
417			  struct drm_file *file)
418{
419	struct drm_i915_gem_get_tiling *args = data;
420	struct drm_i915_private *dev_priv = to_i915(dev);
421	struct drm_i915_gem_object *obj;
422	int err = -ENOENT;
423
424	if (!dev_priv->ggtt.num_fences)
425		return -EOPNOTSUPP;
426
427	rcu_read_lock();
428	obj = i915_gem_object_lookup_rcu(file, args->handle);
429	if (obj) {
430		args->tiling_mode =
431			READ_ONCE(obj->tiling_and_stride) & TILING_MASK;
432		err = 0;
433	}
434	rcu_read_unlock();
435	if (unlikely(err))
436		return err;
437
438	switch (args->tiling_mode) {
439	case I915_TILING_X:
440		args->swizzle_mode = dev_priv->ggtt.bit_6_swizzle_x;
441		break;
442	case I915_TILING_Y:
443		args->swizzle_mode = dev_priv->ggtt.bit_6_swizzle_y;
444		break;
445	default:
446	case I915_TILING_NONE:
447		args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
448		break;
449	}
450
451	/* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */
452	if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
453		args->phys_swizzle_mode = I915_BIT_6_SWIZZLE_UNKNOWN;
454	else
455		args->phys_swizzle_mode = args->swizzle_mode;
456	if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
457		args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
458	if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
459		args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
460
461	return 0;
462}