Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
  1/*
  2 * Copyright © 2008-2015 Intel Corporation
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice (including the next
 12 * paragraph) shall be included in all copies or substantial portions of the
 13 * Software.
 14 *
 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 21 * IN THE SOFTWARE.
 22 */
 23
 24#include <drm/i915_drm.h>
 25
 26#include "i915_drv.h"
 27#include "i915_scatterlist.h"
 28#include "i915_vgpu.h"
 29
 30/**
 31 * DOC: fence register handling
 32 *
 33 * Important to avoid confusions: "fences" in the i915 driver are not execution
 34 * fences used to track command completion but hardware detiler objects which
 35 * wrap a given range of the global GTT. Each platform has only a fairly limited
 36 * set of these objects.
 37 *
 38 * Fences are used to detile GTT memory mappings. They're also connected to the
 39 * hardware frontbuffer render tracking and hence interact with frontbuffer
 40 * compression. Furthermore on older platforms fences are required for tiled
 41 * objects used by the display engine. They can also be used by the render
 42 * engine - they're required for blitter commands and are optional for render
 43 * commands. But on gen4+ both display (with the exception of fbc) and rendering
 44 * have their own tiling state bits and don't need fences.
 45 *
 46 * Also note that fences only support X and Y tiling and hence can't be used for
 47 * the fancier new tiling formats like W, Ys and Yf.
 48 *
 49 * Finally note that because fences are such a restricted resource they're
 50 * dynamically associated with objects. Furthermore fence state is committed to
 51 * the hardware lazily to avoid unnecessary stalls on gen2/3. Therefore code must
 52 * explicitly call i915_gem_object_get_fence() to synchronize fencing status
 53 * for cpu access. Also note that some code wants an unfenced view, for those
 54 * cases the fence can be removed forcefully with i915_gem_object_put_fence().
 55 *
 56 * Internally these functions will synchronize with userspace access by removing
 57 * CPU ptes into GTT mmaps (not the GTT ptes themselves) as needed.
 58 */
 59
 60#define pipelined 0
 61
 62static void i965_write_fence_reg(struct i915_fence_reg *fence,
 63				 struct i915_vma *vma)
 64{
 65	i915_reg_t fence_reg_lo, fence_reg_hi;
 66	int fence_pitch_shift;
 67	u64 val;
 68
 69	if (INTEL_GEN(fence->i915) >= 6) {
 70		fence_reg_lo = FENCE_REG_GEN6_LO(fence->id);
 71		fence_reg_hi = FENCE_REG_GEN6_HI(fence->id);
 72		fence_pitch_shift = GEN6_FENCE_PITCH_SHIFT;
 73
 74	} else {
 75		fence_reg_lo = FENCE_REG_965_LO(fence->id);
 76		fence_reg_hi = FENCE_REG_965_HI(fence->id);
 77		fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
 78	}
 79
 80	val = 0;
 81	if (vma) {
 82		unsigned int stride = i915_gem_object_get_stride(vma->obj);
 83
 84		GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
 85		GEM_BUG_ON(!IS_ALIGNED(vma->node.start, I965_FENCE_PAGE));
 86		GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I965_FENCE_PAGE));
 87		GEM_BUG_ON(!IS_ALIGNED(stride, 128));
 88
 89		val = (vma->node.start + vma->fence_size - I965_FENCE_PAGE) << 32;
 90		val |= vma->node.start;
 91		val |= (u64)((stride / 128) - 1) << fence_pitch_shift;
 92		if (i915_gem_object_get_tiling(vma->obj) == I915_TILING_Y)
 93			val |= BIT(I965_FENCE_TILING_Y_SHIFT);
 94		val |= I965_FENCE_REG_VALID;
 95	}
 96
 97	if (!pipelined) {
 98		struct intel_uncore *uncore = &fence->i915->uncore;
 99
100		/*
101		 * To w/a incoherency with non-atomic 64-bit register updates,
102		 * we split the 64-bit update into two 32-bit writes. In order
103		 * for a partial fence not to be evaluated between writes, we
104		 * precede the update with write to turn off the fence register,
105		 * and only enable the fence as the last step.
106		 *
107		 * For extra levels of paranoia, we make sure each step lands
108		 * before applying the next step.
109		 */
110		intel_uncore_write_fw(uncore, fence_reg_lo, 0);
111		intel_uncore_posting_read_fw(uncore, fence_reg_lo);
112
113		intel_uncore_write_fw(uncore, fence_reg_hi, upper_32_bits(val));
114		intel_uncore_write_fw(uncore, fence_reg_lo, lower_32_bits(val));
115		intel_uncore_posting_read_fw(uncore, fence_reg_lo);
116	}
117}
118
119static void i915_write_fence_reg(struct i915_fence_reg *fence,
120				 struct i915_vma *vma)
121{
122	u32 val;
123
124	val = 0;
125	if (vma) {
126		unsigned int tiling = i915_gem_object_get_tiling(vma->obj);
127		bool is_y_tiled = tiling == I915_TILING_Y;
128		unsigned int stride = i915_gem_object_get_stride(vma->obj);
129
130		GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
131		GEM_BUG_ON(vma->node.start & ~I915_FENCE_START_MASK);
132		GEM_BUG_ON(!is_power_of_2(vma->fence_size));
133		GEM_BUG_ON(!IS_ALIGNED(vma->node.start, vma->fence_size));
134
135		if (is_y_tiled && HAS_128_BYTE_Y_TILING(fence->i915))
136			stride /= 128;
137		else
138			stride /= 512;
139		GEM_BUG_ON(!is_power_of_2(stride));
140
141		val = vma->node.start;
142		if (is_y_tiled)
143			val |= BIT(I830_FENCE_TILING_Y_SHIFT);
144		val |= I915_FENCE_SIZE_BITS(vma->fence_size);
145		val |= ilog2(stride) << I830_FENCE_PITCH_SHIFT;
146
147		val |= I830_FENCE_REG_VALID;
148	}
149
150	if (!pipelined) {
151		struct intel_uncore *uncore = &fence->i915->uncore;
152		i915_reg_t reg = FENCE_REG(fence->id);
153
154		intel_uncore_write_fw(uncore, reg, val);
155		intel_uncore_posting_read_fw(uncore, reg);
156	}
157}
158
159static void i830_write_fence_reg(struct i915_fence_reg *fence,
160				 struct i915_vma *vma)
161{
162	u32 val;
163
164	val = 0;
165	if (vma) {
166		unsigned int stride = i915_gem_object_get_stride(vma->obj);
167
168		GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
169		GEM_BUG_ON(vma->node.start & ~I830_FENCE_START_MASK);
170		GEM_BUG_ON(!is_power_of_2(vma->fence_size));
171		GEM_BUG_ON(!is_power_of_2(stride / 128));
172		GEM_BUG_ON(!IS_ALIGNED(vma->node.start, vma->fence_size));
173
174		val = vma->node.start;
175		if (i915_gem_object_get_tiling(vma->obj) == I915_TILING_Y)
176			val |= BIT(I830_FENCE_TILING_Y_SHIFT);
177		val |= I830_FENCE_SIZE_BITS(vma->fence_size);
178		val |= ilog2(stride / 128) << I830_FENCE_PITCH_SHIFT;
179		val |= I830_FENCE_REG_VALID;
180	}
181
182	if (!pipelined) {
183		struct intel_uncore *uncore = &fence->i915->uncore;
184		i915_reg_t reg = FENCE_REG(fence->id);
185
186		intel_uncore_write_fw(uncore, reg, val);
187		intel_uncore_posting_read_fw(uncore, reg);
188	}
189}
190
191static void fence_write(struct i915_fence_reg *fence,
192			struct i915_vma *vma)
193{
194	/*
195	 * Previous access through the fence register is marshalled by
196	 * the mb() inside the fault handlers (i915_gem_release_mmaps)
197	 * and explicitly managed for internal users.
198	 */
199
200	if (IS_GEN(fence->i915, 2))
201		i830_write_fence_reg(fence, vma);
202	else if (IS_GEN(fence->i915, 3))
203		i915_write_fence_reg(fence, vma);
204	else
205		i965_write_fence_reg(fence, vma);
206
207	/*
208	 * Access through the fenced region afterwards is
209	 * ordered by the posting reads whilst writing the registers.
210	 */
211
212	fence->dirty = false;
213}
214
215static int fence_update(struct i915_fence_reg *fence,
216			struct i915_vma *vma)
217{
218	intel_wakeref_t wakeref;
219	struct i915_vma *old;
220	int ret;
221
222	if (vma) {
223		if (!i915_vma_is_map_and_fenceable(vma))
224			return -EINVAL;
225
226		if (WARN(!i915_gem_object_get_stride(vma->obj) ||
227			 !i915_gem_object_get_tiling(vma->obj),
228			 "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
229			 i915_gem_object_get_stride(vma->obj),
230			 i915_gem_object_get_tiling(vma->obj)))
231			return -EINVAL;
232
233		ret = i915_active_wait(&vma->active);
234		if (ret)
235			return ret;
236	}
237
238	old = xchg(&fence->vma, NULL);
239	if (old) {
240		ret = i915_active_wait(&old->active);
241		if (ret) {
242			fence->vma = old;
243			return ret;
244		}
245
246		i915_vma_flush_writes(old);
247
248		/*
249		 * Ensure that all userspace CPU access is completed before
250		 * stealing the fence.
251		 */
252		if (old != vma) {
253			GEM_BUG_ON(old->fence != fence);
254			i915_vma_revoke_mmap(old);
255			old->fence = NULL;
256		}
257
258		list_move(&fence->link, &fence->i915->ggtt.fence_list);
259	}
260
261	/*
262	 * We only need to update the register itself if the device is awake.
263	 * If the device is currently powered down, we will defer the write
264	 * to the runtime resume, see i915_gem_restore_fences().
265	 *
266	 * This only works for removing the fence register, on acquisition
267	 * the caller must hold the rpm wakeref. The fence register must
268	 * be cleared before we can use any other fences to ensure that
269	 * the new fences do not overlap the elided clears, confusing HW.
270	 */
271	wakeref = intel_runtime_pm_get_if_in_use(&fence->i915->runtime_pm);
272	if (!wakeref) {
273		GEM_BUG_ON(vma);
274		return 0;
275	}
276
277	WRITE_ONCE(fence->vma, vma);
278	fence_write(fence, vma);
279
280	if (vma) {
281		vma->fence = fence;
282		list_move_tail(&fence->link, &fence->i915->ggtt.fence_list);
283	}
284
285	intel_runtime_pm_put(&fence->i915->runtime_pm, wakeref);
286	return 0;
287}
288
289/**
290 * i915_vma_revoke_fence - force-remove fence for a VMA
291 * @vma: vma to map linearly (not through a fence reg)
292 *
293 * This function force-removes any fence from the given object, which is useful
294 * if the kernel wants to do untiled GTT access.
295 *
296 * Returns:
297 *
298 * 0 on success, negative error code on failure.
299 */
300int i915_vma_revoke_fence(struct i915_vma *vma)
301{
302	struct i915_fence_reg *fence = vma->fence;
303
304	lockdep_assert_held(&vma->vm->mutex);
305	if (!fence)
306		return 0;
307
308	if (atomic_read(&fence->pin_count))
309		return -EBUSY;
310
311	return fence_update(fence, NULL);
312}
313
314static struct i915_fence_reg *fence_find(struct drm_i915_private *i915)
315{
316	struct i915_fence_reg *fence;
317
318	list_for_each_entry(fence, &i915->ggtt.fence_list, link) {
319		GEM_BUG_ON(fence->vma && fence->vma->fence != fence);
320
321		if (atomic_read(&fence->pin_count))
322			continue;
323
324		return fence;
325	}
326
327	/* Wait for completion of pending flips which consume fences */
328	if (intel_has_pending_fb_unpin(i915))
329		return ERR_PTR(-EAGAIN);
330
331	return ERR_PTR(-EDEADLK);
332}
333
334static int __i915_vma_pin_fence(struct i915_vma *vma)
335{
336	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm);
337	struct i915_fence_reg *fence;
338	struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL;
339	int err;
340
341	/* Just update our place in the LRU if our fence is getting reused. */
342	if (vma->fence) {
343		fence = vma->fence;
344		GEM_BUG_ON(fence->vma != vma);
345		atomic_inc(&fence->pin_count);
346		if (!fence->dirty) {
347			list_move_tail(&fence->link, &ggtt->fence_list);
348			return 0;
349		}
350	} else if (set) {
351		fence = fence_find(vma->vm->i915);
352		if (IS_ERR(fence))
353			return PTR_ERR(fence);
354
355		GEM_BUG_ON(atomic_read(&fence->pin_count));
356		atomic_inc(&fence->pin_count);
357	} else {
358		return 0;
359	}
360
361	err = fence_update(fence, set);
362	if (err)
363		goto out_unpin;
364
365	GEM_BUG_ON(fence->vma != set);
366	GEM_BUG_ON(vma->fence != (set ? fence : NULL));
367
368	if (set)
369		return 0;
370
371out_unpin:
372	atomic_dec(&fence->pin_count);
373	return err;
374}
375
376/**
377 * i915_vma_pin_fence - set up fencing for a vma
378 * @vma: vma to map through a fence reg
379 *
380 * When mapping objects through the GTT, userspace wants to be able to write
381 * to them without having to worry about swizzling if the object is tiled.
382 * This function walks the fence regs looking for a free one for @obj,
383 * stealing one if it can't find any.
384 *
385 * It then sets up the reg based on the object's properties: address, pitch
386 * and tiling format.
387 *
388 * For an untiled surface, this removes any existing fence.
389 *
390 * Returns:
391 *
392 * 0 on success, negative error code on failure.
393 */
394int i915_vma_pin_fence(struct i915_vma *vma)
395{
396	int err;
397
398	/*
399	 * Note that we revoke fences on runtime suspend. Therefore the user
400	 * must keep the device awake whilst using the fence.
401	 */
402	assert_rpm_wakelock_held(&vma->vm->i915->runtime_pm);
403	GEM_BUG_ON(!i915_vma_is_pinned(vma));
404	GEM_BUG_ON(!i915_vma_is_ggtt(vma));
405
406	err = mutex_lock_interruptible(&vma->vm->mutex);
407	if (err)
408		return err;
409
410	err = __i915_vma_pin_fence(vma);
411	mutex_unlock(&vma->vm->mutex);
412
413	return err;
414}
415
416/**
417 * i915_reserve_fence - Reserve a fence for vGPU
418 * @i915: i915 device private
419 *
420 * This function walks the fence regs looking for a free one and remove
421 * it from the fence_list. It is used to reserve fence for vGPU to use.
422 */
423struct i915_fence_reg *i915_reserve_fence(struct drm_i915_private *i915)
424{
425	struct i915_ggtt *ggtt = &i915->ggtt;
426	struct i915_fence_reg *fence;
427	int count;
428	int ret;
429
430	lockdep_assert_held(&ggtt->vm.mutex);
431
432	/* Keep at least one fence available for the display engine. */
433	count = 0;
434	list_for_each_entry(fence, &ggtt->fence_list, link)
435		count += !atomic_read(&fence->pin_count);
436	if (count <= 1)
437		return ERR_PTR(-ENOSPC);
438
439	fence = fence_find(i915);
440	if (IS_ERR(fence))
441		return fence;
442
443	if (fence->vma) {
444		/* Force-remove fence from VMA */
445		ret = fence_update(fence, NULL);
446		if (ret)
447			return ERR_PTR(ret);
448	}
449
450	list_del(&fence->link);
451
452	return fence;
453}
454
455/**
456 * i915_unreserve_fence - Reclaim a reserved fence
457 * @fence: the fence reg
458 *
459 * This function add a reserved fence register from vGPU to the fence_list.
460 */
461void i915_unreserve_fence(struct i915_fence_reg *fence)
462{
463	struct i915_ggtt *ggtt = &fence->i915->ggtt;
464
465	lockdep_assert_held(&ggtt->vm.mutex);
466
467	list_add(&fence->link, &ggtt->fence_list);
468}
469
470/**
471 * i915_gem_restore_fences - restore fence state
472 * @i915: i915 device private
473 *
474 * Restore the hw fence state to match the software tracking again, to be called
475 * after a gpu reset and on resume. Note that on runtime suspend we only cancel
476 * the fences, to be reacquired by the user later.
477 */
478void i915_gem_restore_fences(struct drm_i915_private *i915)
479{
480	int i;
481
482	rcu_read_lock(); /* keep obj alive as we dereference */
483	for (i = 0; i < i915->ggtt.num_fences; i++) {
484		struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i];
485		struct i915_vma *vma = READ_ONCE(reg->vma);
486
487		GEM_BUG_ON(vma && vma->fence != reg);
488
489		/*
490		 * Commit delayed tiling changes if we have an object still
491		 * attached to the fence, otherwise just clear the fence.
492		 */
493		if (vma && !i915_gem_object_is_tiled(vma->obj))
494			vma = NULL;
495
496		fence_write(reg, vma);
497	}
498	rcu_read_unlock();
499}
500
501/**
502 * DOC: tiling swizzling details
503 *
504 * The idea behind tiling is to increase cache hit rates by rearranging
505 * pixel data so that a group of pixel accesses are in the same cacheline.
506 * Performance improvement from doing this on the back/depth buffer are on
507 * the order of 30%.
508 *
509 * Intel architectures make this somewhat more complicated, though, by
510 * adjustments made to addressing of data when the memory is in interleaved
511 * mode (matched pairs of DIMMS) to improve memory bandwidth.
512 * For interleaved memory, the CPU sends every sequential 64 bytes
513 * to an alternate memory channel so it can get the bandwidth from both.
514 *
515 * The GPU also rearranges its accesses for increased bandwidth to interleaved
516 * memory, and it matches what the CPU does for non-tiled.  However, when tiled
517 * it does it a little differently, since one walks addresses not just in the
518 * X direction but also Y.  So, along with alternating channels when bit
519 * 6 of the address flips, it also alternates when other bits flip --  Bits 9
520 * (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines)
521 * are common to both the 915 and 965-class hardware.
522 *
523 * The CPU also sometimes XORs in higher bits as well, to improve
524 * bandwidth doing strided access like we do so frequently in graphics.  This
525 * is called "Channel XOR Randomization" in the MCH documentation.  The result
526 * is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address
527 * decode.
528 *
529 * All of this bit 6 XORing has an effect on our memory management,
530 * as we need to make sure that the 3d driver can correctly address object
531 * contents.
532 *
533 * If we don't have interleaved memory, all tiling is safe and no swizzling is
534 * required.
535 *
536 * When bit 17 is XORed in, we simply refuse to tile at all.  Bit
537 * 17 is not just a page offset, so as we page an object out and back in,
538 * individual pages in it will have different bit 17 addresses, resulting in
539 * each 64 bytes being swapped with its neighbor!
540 *
541 * Otherwise, if interleaved, we have to tell the 3d driver what the address
542 * swizzling it needs to do is, since it's writing with the CPU to the pages
543 * (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the
544 * pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling
545 * required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order
546 * to match what the GPU expects.
547 */
548
549/**
550 * i915_gem_detect_bit_6_swizzle - detect bit 6 swizzling pattern
551 * @i915: i915 device private
552 *
553 * Detects bit 6 swizzling of address lookup between IGD access and CPU
554 * access through main memory.
555 */
556static void detect_bit_6_swizzle(struct drm_i915_private *i915)
557{
558	struct intel_uncore *uncore = &i915->uncore;
559	u32 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
560	u32 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
561
562	if (INTEL_GEN(i915) >= 8 || IS_VALLEYVIEW(i915)) {
563		/*
564		 * On BDW+, swizzling is not used. We leave the CPU memory
565		 * controller in charge of optimizing memory accesses without
566		 * the extra address manipulation GPU side.
567		 *
568		 * VLV and CHV don't have GPU swizzling.
569		 */
570		swizzle_x = I915_BIT_6_SWIZZLE_NONE;
571		swizzle_y = I915_BIT_6_SWIZZLE_NONE;
572	} else if (INTEL_GEN(i915) >= 6) {
573		if (i915->preserve_bios_swizzle) {
574			if (intel_uncore_read(uncore, DISP_ARB_CTL) &
575			    DISP_TILE_SURFACE_SWIZZLING) {
576				swizzle_x = I915_BIT_6_SWIZZLE_9_10;
577				swizzle_y = I915_BIT_6_SWIZZLE_9;
578			} else {
579				swizzle_x = I915_BIT_6_SWIZZLE_NONE;
580				swizzle_y = I915_BIT_6_SWIZZLE_NONE;
581			}
582		} else {
583			u32 dimm_c0, dimm_c1;
584			dimm_c0 = intel_uncore_read(uncore, MAD_DIMM_C0);
585			dimm_c1 = intel_uncore_read(uncore, MAD_DIMM_C1);
586			dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
587			dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
588			/*
589			 * Enable swizzling when the channels are populated
590			 * with identically sized dimms. We don't need to check
591			 * the 3rd channel because no cpu with gpu attached
592			 * ships in that configuration. Also, swizzling only
593			 * makes sense for 2 channels anyway.
594			 */
595			if (dimm_c0 == dimm_c1) {
596				swizzle_x = I915_BIT_6_SWIZZLE_9_10;
597				swizzle_y = I915_BIT_6_SWIZZLE_9;
598			} else {
599				swizzle_x = I915_BIT_6_SWIZZLE_NONE;
600				swizzle_y = I915_BIT_6_SWIZZLE_NONE;
601			}
602		}
603	} else if (IS_GEN(i915, 5)) {
604		/*
605		 * On Ironlake whatever DRAM config, GPU always do
606		 * same swizzling setup.
607		 */
608		swizzle_x = I915_BIT_6_SWIZZLE_9_10;
609		swizzle_y = I915_BIT_6_SWIZZLE_9;
610	} else if (IS_GEN(i915, 2)) {
611		/*
612		 * As far as we know, the 865 doesn't have these bit 6
613		 * swizzling issues.
614		 */
615		swizzle_x = I915_BIT_6_SWIZZLE_NONE;
616		swizzle_y = I915_BIT_6_SWIZZLE_NONE;
617	} else if (IS_G45(i915) || IS_I965G(i915) || IS_G33(i915)) {
618		/*
619		 * The 965, G33, and newer, have a very flexible memory
620		 * configuration.  It will enable dual-channel mode
621		 * (interleaving) on as much memory as it can, and the GPU
622		 * will additionally sometimes enable different bit 6
623		 * swizzling for tiled objects from the CPU.
624		 *
625		 * Here's what I found on the G965:
626		 *    slot fill         memory size  swizzling
627		 * 0A   0B   1A   1B    1-ch   2-ch
628		 * 512  0    0    0     512    0     O
629		 * 512  0    512  0     16     1008  X
630		 * 512  0    0    512   16     1008  X
631		 * 0    512  0    512   16     1008  X
632		 * 1024 1024 1024 0     2048   1024  O
633		 *
634		 * We could probably detect this based on either the DRB
635		 * matching, which was the case for the swizzling required in
636		 * the table above, or from the 1-ch value being less than
637		 * the minimum size of a rank.
638		 *
639		 * Reports indicate that the swizzling actually
640		 * varies depending upon page placement inside the
641		 * channels, i.e. we see swizzled pages where the
642		 * banks of memory are paired and unswizzled on the
643		 * uneven portion, so leave that as unknown.
644		 */
645		if (intel_uncore_read(uncore, C0DRB3) ==
646		    intel_uncore_read(uncore, C1DRB3)) {
647			swizzle_x = I915_BIT_6_SWIZZLE_9_10;
648			swizzle_y = I915_BIT_6_SWIZZLE_9;
649		}
650	} else {
651		u32 dcc = intel_uncore_read(uncore, DCC);
652
653		/*
654		 * On 9xx chipsets, channel interleave by the CPU is
655		 * determined by DCC.  For single-channel, neither the CPU
656		 * nor the GPU do swizzling.  For dual channel interleaved,
657		 * the GPU's interleave is bit 9 and 10 for X tiled, and bit
658		 * 9 for Y tiled.  The CPU's interleave is independent, and
659		 * can be based on either bit 11 (haven't seen this yet) or
660		 * bit 17 (common).
661		 */
662		switch (dcc & DCC_ADDRESSING_MODE_MASK) {
663		case DCC_ADDRESSING_MODE_SINGLE_CHANNEL:
664		case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC:
665			swizzle_x = I915_BIT_6_SWIZZLE_NONE;
666			swizzle_y = I915_BIT_6_SWIZZLE_NONE;
667			break;
668		case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
669			if (dcc & DCC_CHANNEL_XOR_DISABLE) {
670				/*
671				 * This is the base swizzling by the GPU for
672				 * tiled buffers.
673				 */
674				swizzle_x = I915_BIT_6_SWIZZLE_9_10;
675				swizzle_y = I915_BIT_6_SWIZZLE_9;
676			} else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
677				/* Bit 11 swizzling by the CPU in addition. */
678				swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
679				swizzle_y = I915_BIT_6_SWIZZLE_9_11;
680			} else {
681				/* Bit 17 swizzling by the CPU in addition. */
682				swizzle_x = I915_BIT_6_SWIZZLE_9_10_17;
683				swizzle_y = I915_BIT_6_SWIZZLE_9_17;
684			}
685			break;
686		}
687
688		/* check for L-shaped memory aka modified enhanced addressing */
689		if (IS_GEN(i915, 4) &&
690		    !(intel_uncore_read(uncore, DCC2) & DCC2_MODIFIED_ENHANCED_DISABLE)) {
691			swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
692			swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
693		}
694
695		if (dcc == 0xffffffff) {
696			DRM_ERROR("Couldn't read from MCHBAR.  "
697				  "Disabling tiling.\n");
698			swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
699			swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
700		}
701	}
702
703	if (swizzle_x == I915_BIT_6_SWIZZLE_UNKNOWN ||
704	    swizzle_y == I915_BIT_6_SWIZZLE_UNKNOWN) {
705		/*
706		 * Userspace likes to explode if it sees unknown swizzling,
707		 * so lie. We will finish the lie when reporting through
708		 * the get-tiling-ioctl by reporting the physical swizzle
709		 * mode as unknown instead.
710		 *
711		 * As we don't strictly know what the swizzling is, it may be
712		 * bit17 dependent, and so we need to also prevent the pages
713		 * from being moved.
714		 */
715		i915->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
716		swizzle_x = I915_BIT_6_SWIZZLE_NONE;
717		swizzle_y = I915_BIT_6_SWIZZLE_NONE;
718	}
719
720	i915->mm.bit_6_swizzle_x = swizzle_x;
721	i915->mm.bit_6_swizzle_y = swizzle_y;
722}
723
724/*
725 * Swap every 64 bytes of this page around, to account for it having a new
726 * bit 17 of its physical address and therefore being interpreted differently
727 * by the GPU.
728 */
729static void i915_gem_swizzle_page(struct page *page)
730{
731	char temp[64];
732	char *vaddr;
733	int i;
734
735	vaddr = kmap(page);
736
737	for (i = 0; i < PAGE_SIZE; i += 128) {
738		memcpy(temp, &vaddr[i], 64);
739		memcpy(&vaddr[i], &vaddr[i + 64], 64);
740		memcpy(&vaddr[i + 64], temp, 64);
741	}
742
743	kunmap(page);
744}
745
746/**
747 * i915_gem_object_do_bit_17_swizzle - fixup bit 17 swizzling
748 * @obj: i915 GEM buffer object
749 * @pages: the scattergather list of physical pages
750 *
751 * This function fixes up the swizzling in case any page frame number for this
752 * object has changed in bit 17 since that state has been saved with
753 * i915_gem_object_save_bit_17_swizzle().
754 *
755 * This is called when pinning backing storage again, since the kernel is free
756 * to move unpinned backing storage around (either by directly moving pages or
757 * by swapping them out and back in again).
758 */
759void
760i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
761				  struct sg_table *pages)
762{
763	struct sgt_iter sgt_iter;
764	struct page *page;
765	int i;
766
767	if (obj->bit_17 == NULL)
768		return;
769
770	i = 0;
771	for_each_sgt_page(page, sgt_iter, pages) {
772		char new_bit_17 = page_to_phys(page) >> 17;
773		if ((new_bit_17 & 0x1) != (test_bit(i, obj->bit_17) != 0)) {
774			i915_gem_swizzle_page(page);
775			set_page_dirty(page);
776		}
777		i++;
778	}
779}
780
781/**
782 * i915_gem_object_save_bit_17_swizzle - save bit 17 swizzling
783 * @obj: i915 GEM buffer object
784 * @pages: the scattergather list of physical pages
785 *
786 * This function saves the bit 17 of each page frame number so that swizzling
787 * can be fixed up later on with i915_gem_object_do_bit_17_swizzle(). This must
788 * be called before the backing storage can be unpinned.
789 */
790void
791i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
792				    struct sg_table *pages)
793{
794	const unsigned int page_count = obj->base.size >> PAGE_SHIFT;
795	struct sgt_iter sgt_iter;
796	struct page *page;
797	int i;
798
799	if (obj->bit_17 == NULL) {
800		obj->bit_17 = bitmap_zalloc(page_count, GFP_KERNEL);
801		if (obj->bit_17 == NULL) {
802			DRM_ERROR("Failed to allocate memory for bit 17 "
803				  "record\n");
804			return;
805		}
806	}
807
808	i = 0;
809
810	for_each_sgt_page(page, sgt_iter, pages) {
811		if (page_to_phys(page) & (1 << 17))
812			__set_bit(i, obj->bit_17);
813		else
814			__clear_bit(i, obj->bit_17);
815		i++;
816	}
817}
818
819void i915_ggtt_init_fences(struct i915_ggtt *ggtt)
820{
821	struct drm_i915_private *i915 = ggtt->vm.i915;
822	int num_fences;
823	int i;
824
825	INIT_LIST_HEAD(&ggtt->fence_list);
826	INIT_LIST_HEAD(&ggtt->userfault_list);
827	intel_wakeref_auto_init(&ggtt->userfault_wakeref, &i915->runtime_pm);
828
829	detect_bit_6_swizzle(i915);
830
831	if (INTEL_GEN(i915) >= 7 &&
832	    !(IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)))
833		num_fences = 32;
834	else if (INTEL_GEN(i915) >= 4 ||
835		 IS_I945G(i915) || IS_I945GM(i915) ||
836		 IS_G33(i915) || IS_PINEVIEW(i915))
837		num_fences = 16;
838	else
839		num_fences = 8;
840
841	if (intel_vgpu_active(i915))
842		num_fences = intel_uncore_read(&i915->uncore,
843					       vgtif_reg(avail_rs.fence_num));
844
845	/* Initialize fence registers to zero */
846	for (i = 0; i < num_fences; i++) {
847		struct i915_fence_reg *fence = &ggtt->fence_regs[i];
848
849		fence->i915 = i915;
850		fence->id = i;
851		list_add_tail(&fence->link, &ggtt->fence_list);
852	}
853	ggtt->num_fences = num_fences;
854
855	i915_gem_restore_fences(i915);
856}
857
858void intel_gt_init_swizzling(struct intel_gt *gt)
859{
860	struct drm_i915_private *i915 = gt->i915;
861	struct intel_uncore *uncore = gt->uncore;
862
863	if (INTEL_GEN(i915) < 5 ||
864	    i915->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
865		return;
866
867	intel_uncore_rmw(uncore, DISP_ARB_CTL, 0, DISP_TILE_SURFACE_SWIZZLING);
868
869	if (IS_GEN(i915, 5))
870		return;
871
872	intel_uncore_rmw(uncore, TILECTL, 0, TILECTL_SWZCTL);
873
874	if (IS_GEN(i915, 6))
875		intel_uncore_write(uncore,
876				   ARB_MODE,
877				   _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
878	else if (IS_GEN(i915, 7))
879		intel_uncore_write(uncore,
880				   ARB_MODE,
881				   _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
882	else if (IS_GEN(i915, 8))
883		intel_uncore_write(uncore,
884				   GAMTARBMODE,
885				   _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
886	else
887		MISSING_CASE(INTEL_GEN(i915));
888}