Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
   1// SPDX-License-Identifier: MIT
   2/*
   3 * Copyright © 2020 Intel Corporation
   4 */
   5
   6#include <linux/stop_machine.h>
   7
   8#include <asm/set_memory.h>
   9#include <asm/smp.h>
  10
  11#include <drm/i915_drm.h>
  12
  13#include "intel_gt.h"
  14#include "i915_drv.h"
  15#include "i915_scatterlist.h"
  16#include "i915_vgpu.h"
  17
  18#include "intel_gtt.h"
  19
  20static int
  21i915_get_ggtt_vma_pages(struct i915_vma *vma);
  22
  23static void i915_ggtt_color_adjust(const struct drm_mm_node *node,
  24				   unsigned long color,
  25				   u64 *start,
  26				   u64 *end)
  27{
  28	if (i915_node_color_differs(node, color))
  29		*start += I915_GTT_PAGE_SIZE;
  30
  31	/*
  32	 * Also leave a space between the unallocated reserved node after the
  33	 * GTT and any objects within the GTT, i.e. we use the color adjustment
  34	 * to insert a guard page to prevent prefetches crossing over the
  35	 * GTT boundary.
  36	 */
  37	node = list_next_entry(node, node_list);
  38	if (node->color != color)
  39		*end -= I915_GTT_PAGE_SIZE;
  40}
  41
  42static int ggtt_init_hw(struct i915_ggtt *ggtt)
  43{
  44	struct drm_i915_private *i915 = ggtt->vm.i915;
  45
  46	i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
  47
  48	ggtt->vm.is_ggtt = true;
  49
  50	/* Only VLV supports read-only GGTT mappings */
  51	ggtt->vm.has_read_only = IS_VALLEYVIEW(i915);
  52
  53	if (!HAS_LLC(i915) && !HAS_PPGTT(i915))
  54		ggtt->vm.mm.color_adjust = i915_ggtt_color_adjust;
  55
  56	if (ggtt->mappable_end) {
  57		if (!io_mapping_init_wc(&ggtt->iomap,
  58					ggtt->gmadr.start,
  59					ggtt->mappable_end)) {
  60			ggtt->vm.cleanup(&ggtt->vm);
  61			return -EIO;
  62		}
  63
  64		ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start,
  65					      ggtt->mappable_end);
  66	}
  67
  68	intel_ggtt_init_fences(ggtt);
  69
  70	return 0;
  71}
  72
  73/**
  74 * i915_ggtt_init_hw - Initialize GGTT hardware
  75 * @i915: i915 device
  76 */
  77int i915_ggtt_init_hw(struct drm_i915_private *i915)
  78{
  79	int ret;
  80
  81	stash_init(&i915->mm.wc_stash);
  82
  83	/*
  84	 * Note that we use page colouring to enforce a guard page at the
  85	 * end of the address space. This is required as the CS may prefetch
  86	 * beyond the end of the batch buffer, across the page boundary,
  87	 * and beyond the end of the GTT if we do not provide a guard.
  88	 */
  89	ret = ggtt_init_hw(&i915->ggtt);
  90	if (ret)
  91		return ret;
  92
  93	return 0;
  94}
  95
  96/*
  97 * Certain Gen5 chipsets require require idling the GPU before
  98 * unmapping anything from the GTT when VT-d is enabled.
  99 */
 100static bool needs_idle_maps(struct drm_i915_private *i915)
 101{
 102	/*
 103	 * Query intel_iommu to see if we need the workaround. Presumably that
 104	 * was loaded first.
 105	 */
 106	return IS_GEN(i915, 5) && IS_MOBILE(i915) && intel_vtd_active();
 107}
 108
 109void i915_ggtt_suspend(struct i915_ggtt *ggtt)
 110{
 111	struct i915_vma *vma, *vn;
 112	int open;
 113
 114	mutex_lock(&ggtt->vm.mutex);
 115
 116	/* Skip rewriting PTE on VMA unbind. */
 117	open = atomic_xchg(&ggtt->vm.open, 0);
 118
 119	list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) {
 120		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
 121		i915_vma_wait_for_bind(vma);
 122
 123		if (i915_vma_is_pinned(vma))
 124			continue;
 125
 126		if (!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) {
 127			__i915_vma_evict(vma);
 128			drm_mm_remove_node(&vma->node);
 129		}
 130	}
 131
 132	ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
 133	ggtt->invalidate(ggtt);
 134	atomic_set(&ggtt->vm.open, open);
 135
 136	mutex_unlock(&ggtt->vm.mutex);
 137
 138	intel_gt_check_and_clear_faults(ggtt->vm.gt);
 139}
 140
 141void gen6_ggtt_invalidate(struct i915_ggtt *ggtt)
 142{
 143	struct intel_uncore *uncore = ggtt->vm.gt->uncore;
 144
 145	spin_lock_irq(&uncore->lock);
 146	intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
 147	intel_uncore_read_fw(uncore, GFX_FLSH_CNTL_GEN6);
 148	spin_unlock_irq(&uncore->lock);
 149}
 150
 151static void gen8_ggtt_invalidate(struct i915_ggtt *ggtt)
 152{
 153	struct intel_uncore *uncore = ggtt->vm.gt->uncore;
 154
 155	/*
 156	 * Note that as an uncached mmio write, this will flush the
 157	 * WCB of the writes into the GGTT before it triggers the invalidate.
 158	 */
 159	intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
 160}
 161
 162static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
 163{
 164	struct intel_uncore *uncore = ggtt->vm.gt->uncore;
 165	struct drm_i915_private *i915 = ggtt->vm.i915;
 166
 167	gen8_ggtt_invalidate(ggtt);
 168
 169	if (INTEL_GEN(i915) >= 12)
 170		intel_uncore_write_fw(uncore, GEN12_GUC_TLB_INV_CR,
 171				      GEN12_GUC_TLB_INV_CR_INVALIDATE);
 172	else
 173		intel_uncore_write_fw(uncore, GEN8_GTCR, GEN8_GTCR_INVALIDATE);
 174}
 175
 176static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt)
 177{
 178	intel_gtt_chipset_flush();
 179}
 180
 181static u64 gen8_ggtt_pte_encode(dma_addr_t addr,
 182				enum i915_cache_level level,
 183				u32 flags)
 184{
 185	return addr | _PAGE_PRESENT;
 186}
 187
 188static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
 189{
 190	writeq(pte, addr);
 191}
 192
 193static void gen8_ggtt_insert_page(struct i915_address_space *vm,
 194				  dma_addr_t addr,
 195				  u64 offset,
 196				  enum i915_cache_level level,
 197				  u32 unused)
 198{
 199	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
 200	gen8_pte_t __iomem *pte =
 201		(gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
 202
 203	gen8_set_pte(pte, gen8_ggtt_pte_encode(addr, level, 0));
 204
 205	ggtt->invalidate(ggtt);
 206}
 207
 208static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
 209				     struct i915_vma *vma,
 210				     enum i915_cache_level level,
 211				     u32 flags)
 212{
 213	const gen8_pte_t pte_encode = gen8_ggtt_pte_encode(0, level, 0);
 214	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
 215	gen8_pte_t __iomem *gte;
 216	gen8_pte_t __iomem *end;
 217	struct sgt_iter iter;
 218	dma_addr_t addr;
 219
 220	/*
 221	 * Note that we ignore PTE_READ_ONLY here. The caller must be careful
 222	 * not to allow the user to override access to a read only page.
 223	 */
 224
 225	gte = (gen8_pte_t __iomem *)ggtt->gsm;
 226	gte += vma->node.start / I915_GTT_PAGE_SIZE;
 227	end = gte + vma->node.size / I915_GTT_PAGE_SIZE;
 228
 229	for_each_sgt_daddr(addr, iter, vma->pages)
 230		gen8_set_pte(gte++, pte_encode | addr);
 231	GEM_BUG_ON(gte > end);
 232
 233	/* Fill the allocated but "unused" space beyond the end of the buffer */
 234	while (gte < end)
 235		gen8_set_pte(gte++, vm->scratch[0].encode);
 236
 237	/*
 238	 * We want to flush the TLBs only after we're certain all the PTE
 239	 * updates have finished.
 240	 */
 241	ggtt->invalidate(ggtt);
 242}
 243
 244static void gen6_ggtt_insert_page(struct i915_address_space *vm,
 245				  dma_addr_t addr,
 246				  u64 offset,
 247				  enum i915_cache_level level,
 248				  u32 flags)
 249{
 250	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
 251	gen6_pte_t __iomem *pte =
 252		(gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
 253
 254	iowrite32(vm->pte_encode(addr, level, flags), pte);
 255
 256	ggtt->invalidate(ggtt);
 257}
 258
 259/*
 260 * Binds an object into the global gtt with the specified cache level.
 261 * The object will be accessible to the GPU via commands whose operands
 262 * reference offsets within the global GTT as well as accessible by the GPU
 263 * through the GMADR mapped BAR (i915->mm.gtt->gtt).
 264 */
 265static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
 266				     struct i915_vma *vma,
 267				     enum i915_cache_level level,
 268				     u32 flags)
 269{
 270	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
 271	gen6_pte_t __iomem *gte;
 272	gen6_pte_t __iomem *end;
 273	struct sgt_iter iter;
 274	dma_addr_t addr;
 275
 276	gte = (gen6_pte_t __iomem *)ggtt->gsm;
 277	gte += vma->node.start / I915_GTT_PAGE_SIZE;
 278	end = gte + vma->node.size / I915_GTT_PAGE_SIZE;
 279
 280	for_each_sgt_daddr(addr, iter, vma->pages)
 281		iowrite32(vm->pte_encode(addr, level, flags), gte++);
 282	GEM_BUG_ON(gte > end);
 283
 284	/* Fill the allocated but "unused" space beyond the end of the buffer */
 285	while (gte < end)
 286		iowrite32(vm->scratch[0].encode, gte++);
 287
 288	/*
 289	 * We want to flush the TLBs only after we're certain all the PTE
 290	 * updates have finished.
 291	 */
 292	ggtt->invalidate(ggtt);
 293}
 294
 295static void nop_clear_range(struct i915_address_space *vm,
 296			    u64 start, u64 length)
 297{
 298}
 299
 300static void gen8_ggtt_clear_range(struct i915_address_space *vm,
 301				  u64 start, u64 length)
 302{
 303	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
 304	unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
 305	unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
 306	const gen8_pte_t scratch_pte = vm->scratch[0].encode;
 307	gen8_pte_t __iomem *gtt_base =
 308		(gen8_pte_t __iomem *)ggtt->gsm + first_entry;
 309	const int max_entries = ggtt_total_entries(ggtt) - first_entry;
 310	int i;
 311
 312	if (WARN(num_entries > max_entries,
 313		 "First entry = %d; Num entries = %d (max=%d)\n",
 314		 first_entry, num_entries, max_entries))
 315		num_entries = max_entries;
 316
 317	for (i = 0; i < num_entries; i++)
 318		gen8_set_pte(&gtt_base[i], scratch_pte);
 319}
 320
 321static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
 322{
 323	/*
 324	 * Make sure the internal GAM fifo has been cleared of all GTT
 325	 * writes before exiting stop_machine(). This guarantees that
 326	 * any aperture accesses waiting to start in another process
 327	 * cannot back up behind the GTT writes causing a hang.
 328	 * The register can be any arbitrary GAM register.
 329	 */
 330	intel_uncore_posting_read_fw(vm->gt->uncore, GFX_FLSH_CNTL_GEN6);
 331}
 332
 333struct insert_page {
 334	struct i915_address_space *vm;
 335	dma_addr_t addr;
 336	u64 offset;
 337	enum i915_cache_level level;
 338};
 339
 340static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
 341{
 342	struct insert_page *arg = _arg;
 343
 344	gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0);
 345	bxt_vtd_ggtt_wa(arg->vm);
 346
 347	return 0;
 348}
 349
 350static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
 351					  dma_addr_t addr,
 352					  u64 offset,
 353					  enum i915_cache_level level,
 354					  u32 unused)
 355{
 356	struct insert_page arg = { vm, addr, offset, level };
 357
 358	stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
 359}
 360
 361struct insert_entries {
 362	struct i915_address_space *vm;
 363	struct i915_vma *vma;
 364	enum i915_cache_level level;
 365	u32 flags;
 366};
 367
 368static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
 369{
 370	struct insert_entries *arg = _arg;
 371
 372	gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, arg->flags);
 373	bxt_vtd_ggtt_wa(arg->vm);
 374
 375	return 0;
 376}
 377
 378static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
 379					     struct i915_vma *vma,
 380					     enum i915_cache_level level,
 381					     u32 flags)
 382{
 383	struct insert_entries arg = { vm, vma, level, flags };
 384
 385	stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
 386}
 387
 388static void gen6_ggtt_clear_range(struct i915_address_space *vm,
 389				  u64 start, u64 length)
 390{
 391	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
 392	unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
 393	unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
 394	gen6_pte_t scratch_pte, __iomem *gtt_base =
 395		(gen6_pte_t __iomem *)ggtt->gsm + first_entry;
 396	const int max_entries = ggtt_total_entries(ggtt) - first_entry;
 397	int i;
 398
 399	if (WARN(num_entries > max_entries,
 400		 "First entry = %d; Num entries = %d (max=%d)\n",
 401		 first_entry, num_entries, max_entries))
 402		num_entries = max_entries;
 403
 404	scratch_pte = vm->scratch[0].encode;
 405	for (i = 0; i < num_entries; i++)
 406		iowrite32(scratch_pte, &gtt_base[i]);
 407}
 408
 409static void i915_ggtt_insert_page(struct i915_address_space *vm,
 410				  dma_addr_t addr,
 411				  u64 offset,
 412				  enum i915_cache_level cache_level,
 413				  u32 unused)
 414{
 415	unsigned int flags = (cache_level == I915_CACHE_NONE) ?
 416		AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
 417
 418	intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
 419}
 420
 421static void i915_ggtt_insert_entries(struct i915_address_space *vm,
 422				     struct i915_vma *vma,
 423				     enum i915_cache_level cache_level,
 424				     u32 unused)
 425{
 426	unsigned int flags = (cache_level == I915_CACHE_NONE) ?
 427		AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
 428
 429	intel_gtt_insert_sg_entries(vma->pages, vma->node.start >> PAGE_SHIFT,
 430				    flags);
 431}
 432
 433static void i915_ggtt_clear_range(struct i915_address_space *vm,
 434				  u64 start, u64 length)
 435{
 436	intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
 437}
 438
 439static int ggtt_bind_vma(struct i915_address_space *vm,
 440			 struct i915_vma *vma,
 441			 enum i915_cache_level cache_level,
 442			 u32 flags)
 443{
 444	struct drm_i915_gem_object *obj = vma->obj;
 445	u32 pte_flags;
 446
 447	if (i915_vma_is_bound(vma, ~flags & I915_VMA_BIND_MASK))
 448		return 0;
 449
 450	/* Applicable to VLV (gen8+ do not support RO in the GGTT) */
 451	pte_flags = 0;
 452	if (i915_gem_object_is_readonly(obj))
 453		pte_flags |= PTE_READ_ONLY;
 454
 455	vm->insert_entries(vm, vma, cache_level, pte_flags);
 456	vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
 457
 458	return 0;
 459}
 460
 461static void ggtt_unbind_vma(struct i915_address_space *vm, struct i915_vma *vma)
 462{
 463	vm->clear_range(vm, vma->node.start, vma->size);
 464}
 465
 466static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt)
 467{
 468	u64 size;
 469	int ret;
 470
 471	if (!intel_uc_uses_guc(&ggtt->vm.gt->uc))
 472		return 0;
 473
 474	GEM_BUG_ON(ggtt->vm.total <= GUC_GGTT_TOP);
 475	size = ggtt->vm.total - GUC_GGTT_TOP;
 476
 477	ret = i915_gem_gtt_reserve(&ggtt->vm, &ggtt->uc_fw, size,
 478				   GUC_GGTT_TOP, I915_COLOR_UNEVICTABLE,
 479				   PIN_NOEVICT);
 480	if (ret)
 481		drm_dbg(&ggtt->vm.i915->drm,
 482			"Failed to reserve top of GGTT for GuC\n");
 483
 484	return ret;
 485}
 486
 487static void ggtt_release_guc_top(struct i915_ggtt *ggtt)
 488{
 489	if (drm_mm_node_allocated(&ggtt->uc_fw))
 490		drm_mm_remove_node(&ggtt->uc_fw);
 491}
 492
 493static void cleanup_init_ggtt(struct i915_ggtt *ggtt)
 494{
 495	ggtt_release_guc_top(ggtt);
 496	if (drm_mm_node_allocated(&ggtt->error_capture))
 497		drm_mm_remove_node(&ggtt->error_capture);
 498	mutex_destroy(&ggtt->error_mutex);
 499}
 500
 501static int init_ggtt(struct i915_ggtt *ggtt)
 502{
 503	/*
 504	 * Let GEM Manage all of the aperture.
 505	 *
 506	 * However, leave one page at the end still bound to the scratch page.
 507	 * There are a number of places where the hardware apparently prefetches
 508	 * past the end of the object, and we've seen multiple hangs with the
 509	 * GPU head pointer stuck in a batchbuffer bound at the last page of the
 510	 * aperture.  One page should be enough to keep any prefetching inside
 511	 * of the aperture.
 512	 */
 513	unsigned long hole_start, hole_end;
 514	struct drm_mm_node *entry;
 515	int ret;
 516
 517	/*
 518	 * GuC requires all resources that we're sharing with it to be placed in
 519	 * non-WOPCM memory. If GuC is not present or not in use we still need a
 520	 * small bias as ring wraparound at offset 0 sometimes hangs. No idea
 521	 * why.
 522	 */
 523	ggtt->pin_bias = max_t(u32, I915_GTT_PAGE_SIZE,
 524			       intel_wopcm_guc_size(&ggtt->vm.i915->wopcm));
 525
 526	ret = intel_vgt_balloon(ggtt);
 527	if (ret)
 528		return ret;
 529
 530	mutex_init(&ggtt->error_mutex);
 531	if (ggtt->mappable_end) {
 532		/* Reserve a mappable slot for our lockless error capture */
 533		ret = drm_mm_insert_node_in_range(&ggtt->vm.mm,
 534						  &ggtt->error_capture,
 535						  PAGE_SIZE, 0,
 536						  I915_COLOR_UNEVICTABLE,
 537						  0, ggtt->mappable_end,
 538						  DRM_MM_INSERT_LOW);
 539		if (ret)
 540			return ret;
 541	}
 542
 543	/*
 544	 * The upper portion of the GuC address space has a sizeable hole
 545	 * (several MB) that is inaccessible by GuC. Reserve this range within
 546	 * GGTT as it can comfortably hold GuC/HuC firmware images.
 547	 */
 548	ret = ggtt_reserve_guc_top(ggtt);
 549	if (ret)
 550		goto err;
 551
 552	/* Clear any non-preallocated blocks */
 553	drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) {
 554		drm_dbg_kms(&ggtt->vm.i915->drm,
 555			    "clearing unused GTT space: [%lx, %lx]\n",
 556			    hole_start, hole_end);
 557		ggtt->vm.clear_range(&ggtt->vm, hole_start,
 558				     hole_end - hole_start);
 559	}
 560
 561	/* And finally clear the reserved guard page */
 562	ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE);
 563
 564	return 0;
 565
 566err:
 567	cleanup_init_ggtt(ggtt);
 568	return ret;
 569}
 570
 571static int aliasing_gtt_bind_vma(struct i915_address_space *vm,
 572				 struct i915_vma *vma,
 573				 enum i915_cache_level cache_level,
 574				 u32 flags)
 575{
 576	u32 pte_flags;
 577	int ret;
 578
 579	/* Currently applicable only to VLV */
 580	pte_flags = 0;
 581	if (i915_gem_object_is_readonly(vma->obj))
 582		pte_flags |= PTE_READ_ONLY;
 583
 584	if (flags & I915_VMA_LOCAL_BIND) {
 585		struct i915_ppgtt *alias = i915_vm_to_ggtt(vm)->alias;
 586
 587		ret = ppgtt_bind_vma(&alias->vm, vma, cache_level, flags);
 588		if (ret)
 589			return ret;
 590	}
 591
 592	if (flags & I915_VMA_GLOBAL_BIND)
 593		vm->insert_entries(vm, vma, cache_level, pte_flags);
 594
 595	return 0;
 596}
 597
 598static void aliasing_gtt_unbind_vma(struct i915_address_space *vm,
 599				    struct i915_vma *vma)
 600{
 601	if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
 602		vm->clear_range(vm, vma->node.start, vma->size);
 603
 604	if (i915_vma_is_bound(vma, I915_VMA_LOCAL_BIND))
 605		ppgtt_unbind_vma(&i915_vm_to_ggtt(vm)->alias->vm, vma);
 606}
 607
 608static int init_aliasing_ppgtt(struct i915_ggtt *ggtt)
 609{
 610	struct i915_ppgtt *ppgtt;
 611	int err;
 612
 613	ppgtt = i915_ppgtt_create(ggtt->vm.gt);
 614	if (IS_ERR(ppgtt))
 615		return PTR_ERR(ppgtt);
 616
 617	if (GEM_WARN_ON(ppgtt->vm.total < ggtt->vm.total)) {
 618		err = -ENODEV;
 619		goto err_ppgtt;
 620	}
 621
 622	/*
 623	 * Note we only pre-allocate as far as the end of the global
 624	 * GTT. On 48b / 4-level page-tables, the difference is very,
 625	 * very significant! We have to preallocate as GVT/vgpu does
 626	 * not like the page directory disappearing.
 627	 */
 628	err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, ggtt->vm.total);
 629	if (err)
 630		goto err_ppgtt;
 631
 632	ggtt->alias = ppgtt;
 633	ggtt->vm.bind_async_flags |= ppgtt->vm.bind_async_flags;
 634
 635	GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != ggtt_bind_vma);
 636	ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma;
 637
 638	GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != ggtt_unbind_vma);
 639	ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma;
 640
 641	return 0;
 642
 643err_ppgtt:
 644	i915_vm_put(&ppgtt->vm);
 645	return err;
 646}
 647
 648static void fini_aliasing_ppgtt(struct i915_ggtt *ggtt)
 649{
 650	struct i915_ppgtt *ppgtt;
 651
 652	ppgtt = fetch_and_zero(&ggtt->alias);
 653	if (!ppgtt)
 654		return;
 655
 656	i915_vm_put(&ppgtt->vm);
 657
 658	ggtt->vm.vma_ops.bind_vma   = ggtt_bind_vma;
 659	ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
 660}
 661
 662int i915_init_ggtt(struct drm_i915_private *i915)
 663{
 664	int ret;
 665
 666	ret = init_ggtt(&i915->ggtt);
 667	if (ret)
 668		return ret;
 669
 670	if (INTEL_PPGTT(i915) == INTEL_PPGTT_ALIASING) {
 671		ret = init_aliasing_ppgtt(&i915->ggtt);
 672		if (ret)
 673			cleanup_init_ggtt(&i915->ggtt);
 674	}
 675
 676	return 0;
 677}
 678
 679static void ggtt_cleanup_hw(struct i915_ggtt *ggtt)
 680{
 681	struct i915_vma *vma, *vn;
 682
 683	atomic_set(&ggtt->vm.open, 0);
 684
 685	rcu_barrier(); /* flush the RCU'ed__i915_vm_release */
 686	flush_workqueue(ggtt->vm.i915->wq);
 687
 688	mutex_lock(&ggtt->vm.mutex);
 689
 690	list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link)
 691		WARN_ON(__i915_vma_unbind(vma));
 692
 693	if (drm_mm_node_allocated(&ggtt->error_capture))
 694		drm_mm_remove_node(&ggtt->error_capture);
 695	mutex_destroy(&ggtt->error_mutex);
 696
 697	ggtt_release_guc_top(ggtt);
 698	intel_vgt_deballoon(ggtt);
 699
 700	ggtt->vm.cleanup(&ggtt->vm);
 701
 702	mutex_unlock(&ggtt->vm.mutex);
 703	i915_address_space_fini(&ggtt->vm);
 704
 705	arch_phys_wc_del(ggtt->mtrr);
 706
 707	if (ggtt->iomap.size)
 708		io_mapping_fini(&ggtt->iomap);
 709}
 710
 711/**
 712 * i915_ggtt_driver_release - Clean up GGTT hardware initialization
 713 * @i915: i915 device
 714 */
 715void i915_ggtt_driver_release(struct drm_i915_private *i915)
 716{
 717	struct i915_ggtt *ggtt = &i915->ggtt;
 718	struct pagevec *pvec;
 719
 720	fini_aliasing_ppgtt(ggtt);
 721
 722	intel_ggtt_fini_fences(ggtt);
 723	ggtt_cleanup_hw(ggtt);
 724
 725	pvec = &i915->mm.wc_stash.pvec;
 726	if (pvec->nr) {
 727		set_pages_array_wb(pvec->pages, pvec->nr);
 728		__pagevec_release(pvec);
 729	}
 730}
 731
 732static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
 733{
 734	snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
 735	snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
 736	return snb_gmch_ctl << 20;
 737}
 738
 739static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
 740{
 741	bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
 742	bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
 743	if (bdw_gmch_ctl)
 744		bdw_gmch_ctl = 1 << bdw_gmch_ctl;
 745
 746#ifdef CONFIG_X86_32
 747	/* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * I915_GTT_PAGE_SIZE */
 748	if (bdw_gmch_ctl > 4)
 749		bdw_gmch_ctl = 4;
 750#endif
 751
 752	return bdw_gmch_ctl << 20;
 753}
 754
 755static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
 756{
 757	gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
 758	gmch_ctrl &= SNB_GMCH_GGMS_MASK;
 759
 760	if (gmch_ctrl)
 761		return 1 << (20 + gmch_ctrl);
 762
 763	return 0;
 764}
 765
 766static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
 767{
 768	struct drm_i915_private *i915 = ggtt->vm.i915;
 769	struct pci_dev *pdev = i915->drm.pdev;
 770	phys_addr_t phys_addr;
 771	int ret;
 772
 773	/* For Modern GENs the PTEs and register space are split in the BAR */
 774	phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2;
 775
 776	/*
 777	 * On BXT+/CNL+ writes larger than 64 bit to the GTT pagetable range
 778	 * will be dropped. For WC mappings in general we have 64 byte burst
 779	 * writes when the WC buffer is flushed, so we can't use it, but have to
 780	 * resort to an uncached mapping. The WC issue is easily caught by the
 781	 * readback check when writing GTT PTE entries.
 782	 */
 783	if (IS_GEN9_LP(i915) || INTEL_GEN(i915) >= 10)
 784		ggtt->gsm = ioremap(phys_addr, size);
 785	else
 786		ggtt->gsm = ioremap_wc(phys_addr, size);
 787	if (!ggtt->gsm) {
 788		drm_err(&i915->drm, "Failed to map the ggtt page table\n");
 789		return -ENOMEM;
 790	}
 791
 792	ret = setup_scratch_page(&ggtt->vm, GFP_DMA32);
 793	if (ret) {
 794		drm_err(&i915->drm, "Scratch setup failed\n");
 795		/* iounmap will also get called at remove, but meh */
 796		iounmap(ggtt->gsm);
 797		return ret;
 798	}
 799
 800	ggtt->vm.scratch[0].encode =
 801		ggtt->vm.pte_encode(px_dma(&ggtt->vm.scratch[0]),
 802				    I915_CACHE_NONE, 0);
 803
 804	return 0;
 805}
 806
 807int ggtt_set_pages(struct i915_vma *vma)
 808{
 809	int ret;
 810
 811	GEM_BUG_ON(vma->pages);
 812
 813	ret = i915_get_ggtt_vma_pages(vma);
 814	if (ret)
 815		return ret;
 816
 817	vma->page_sizes = vma->obj->mm.page_sizes;
 818
 819	return 0;
 820}
 821
 822static void gen6_gmch_remove(struct i915_address_space *vm)
 823{
 824	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
 825
 826	iounmap(ggtt->gsm);
 827	cleanup_scratch_page(vm);
 828}
 829
 830static struct resource pci_resource(struct pci_dev *pdev, int bar)
 831{
 832	return (struct resource)DEFINE_RES_MEM(pci_resource_start(pdev, bar),
 833					       pci_resource_len(pdev, bar));
 834}
 835
 836static int gen8_gmch_probe(struct i915_ggtt *ggtt)
 837{
 838	struct drm_i915_private *i915 = ggtt->vm.i915;
 839	struct pci_dev *pdev = i915->drm.pdev;
 840	unsigned int size;
 841	u16 snb_gmch_ctl;
 842
 843	/* TODO: We're not aware of mappable constraints on gen8 yet */
 844	if (!IS_DGFX(i915)) {
 845		ggtt->gmadr = pci_resource(pdev, 2);
 846		ggtt->mappable_end = resource_size(&ggtt->gmadr);
 847	}
 848
 849	pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
 850	if (IS_CHERRYVIEW(i915))
 851		size = chv_get_total_gtt_size(snb_gmch_ctl);
 852	else
 853		size = gen8_get_total_gtt_size(snb_gmch_ctl);
 854
 855	ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
 856	ggtt->vm.cleanup = gen6_gmch_remove;
 857	ggtt->vm.insert_page = gen8_ggtt_insert_page;
 858	ggtt->vm.clear_range = nop_clear_range;
 859	if (intel_scanout_needs_vtd_wa(i915))
 860		ggtt->vm.clear_range = gen8_ggtt_clear_range;
 861
 862	ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
 863
 864	/* Serialize GTT updates with aperture access on BXT if VT-d is on. */
 865	if (intel_ggtt_update_needs_vtd_wa(i915) ||
 866	    IS_CHERRYVIEW(i915) /* fails with concurrent use/update */) {
 867		ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
 868		ggtt->vm.insert_page    = bxt_vtd_ggtt_insert_page__BKL;
 869		ggtt->vm.bind_async_flags =
 870			I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
 871	}
 872
 873	ggtt->invalidate = gen8_ggtt_invalidate;
 874
 875	ggtt->vm.vma_ops.bind_vma    = ggtt_bind_vma;
 876	ggtt->vm.vma_ops.unbind_vma  = ggtt_unbind_vma;
 877	ggtt->vm.vma_ops.set_pages   = ggtt_set_pages;
 878	ggtt->vm.vma_ops.clear_pages = clear_pages;
 879
 880	ggtt->vm.pte_encode = gen8_ggtt_pte_encode;
 881
 882	setup_private_pat(ggtt->vm.gt->uncore);
 883
 884	return ggtt_probe_common(ggtt, size);
 885}
 886
 887static u64 snb_pte_encode(dma_addr_t addr,
 888			  enum i915_cache_level level,
 889			  u32 flags)
 890{
 891	gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
 892
 893	switch (level) {
 894	case I915_CACHE_L3_LLC:
 895	case I915_CACHE_LLC:
 896		pte |= GEN6_PTE_CACHE_LLC;
 897		break;
 898	case I915_CACHE_NONE:
 899		pte |= GEN6_PTE_UNCACHED;
 900		break;
 901	default:
 902		MISSING_CASE(level);
 903	}
 904
 905	return pte;
 906}
 907
 908static u64 ivb_pte_encode(dma_addr_t addr,
 909			  enum i915_cache_level level,
 910			  u32 flags)
 911{
 912	gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
 913
 914	switch (level) {
 915	case I915_CACHE_L3_LLC:
 916		pte |= GEN7_PTE_CACHE_L3_LLC;
 917		break;
 918	case I915_CACHE_LLC:
 919		pte |= GEN6_PTE_CACHE_LLC;
 920		break;
 921	case I915_CACHE_NONE:
 922		pte |= GEN6_PTE_UNCACHED;
 923		break;
 924	default:
 925		MISSING_CASE(level);
 926	}
 927
 928	return pte;
 929}
 930
 931static u64 byt_pte_encode(dma_addr_t addr,
 932			  enum i915_cache_level level,
 933			  u32 flags)
 934{
 935	gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
 936
 937	if (!(flags & PTE_READ_ONLY))
 938		pte |= BYT_PTE_WRITEABLE;
 939
 940	if (level != I915_CACHE_NONE)
 941		pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
 942
 943	return pte;
 944}
 945
 946static u64 hsw_pte_encode(dma_addr_t addr,
 947			  enum i915_cache_level level,
 948			  u32 flags)
 949{
 950	gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
 951
 952	if (level != I915_CACHE_NONE)
 953		pte |= HSW_WB_LLC_AGE3;
 954
 955	return pte;
 956}
 957
 958static u64 iris_pte_encode(dma_addr_t addr,
 959			   enum i915_cache_level level,
 960			   u32 flags)
 961{
 962	gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
 963
 964	switch (level) {
 965	case I915_CACHE_NONE:
 966		break;
 967	case I915_CACHE_WT:
 968		pte |= HSW_WT_ELLC_LLC_AGE3;
 969		break;
 970	default:
 971		pte |= HSW_WB_ELLC_LLC_AGE3;
 972		break;
 973	}
 974
 975	return pte;
 976}
 977
 978static int gen6_gmch_probe(struct i915_ggtt *ggtt)
 979{
 980	struct drm_i915_private *i915 = ggtt->vm.i915;
 981	struct pci_dev *pdev = i915->drm.pdev;
 982	unsigned int size;
 983	u16 snb_gmch_ctl;
 984
 985	ggtt->gmadr = pci_resource(pdev, 2);
 986	ggtt->mappable_end = resource_size(&ggtt->gmadr);
 987
 988	/*
 989	 * 64/512MB is the current min/max we actually know of, but this is
 990	 * just a coarse sanity check.
 991	 */
 992	if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) {
 993		drm_err(&i915->drm, "Unknown GMADR size (%pa)\n",
 994			&ggtt->mappable_end);
 995		return -ENXIO;
 996	}
 997
 998	pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
 999
1000	size = gen6_get_total_gtt_size(snb_gmch_ctl);
1001	ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE;
1002
1003	ggtt->vm.clear_range = nop_clear_range;
1004	if (!HAS_FULL_PPGTT(i915) || intel_scanout_needs_vtd_wa(i915))
1005		ggtt->vm.clear_range = gen6_ggtt_clear_range;
1006	ggtt->vm.insert_page = gen6_ggtt_insert_page;
1007	ggtt->vm.insert_entries = gen6_ggtt_insert_entries;
1008	ggtt->vm.cleanup = gen6_gmch_remove;
1009
1010	ggtt->invalidate = gen6_ggtt_invalidate;
1011
1012	if (HAS_EDRAM(i915))
1013		ggtt->vm.pte_encode = iris_pte_encode;
1014	else if (IS_HASWELL(i915))
1015		ggtt->vm.pte_encode = hsw_pte_encode;
1016	else if (IS_VALLEYVIEW(i915))
1017		ggtt->vm.pte_encode = byt_pte_encode;
1018	else if (INTEL_GEN(i915) >= 7)
1019		ggtt->vm.pte_encode = ivb_pte_encode;
1020	else
1021		ggtt->vm.pte_encode = snb_pte_encode;
1022
1023	ggtt->vm.vma_ops.bind_vma    = ggtt_bind_vma;
1024	ggtt->vm.vma_ops.unbind_vma  = ggtt_unbind_vma;
1025	ggtt->vm.vma_ops.set_pages   = ggtt_set_pages;
1026	ggtt->vm.vma_ops.clear_pages = clear_pages;
1027
1028	return ggtt_probe_common(ggtt, size);
1029}
1030
1031static void i915_gmch_remove(struct i915_address_space *vm)
1032{
1033	intel_gmch_remove();
1034}
1035
1036static int i915_gmch_probe(struct i915_ggtt *ggtt)
1037{
1038	struct drm_i915_private *i915 = ggtt->vm.i915;
1039	phys_addr_t gmadr_base;
1040	int ret;
1041
1042	ret = intel_gmch_probe(i915->bridge_dev, i915->drm.pdev, NULL);
1043	if (!ret) {
1044		drm_err(&i915->drm, "failed to set up gmch\n");
1045		return -EIO;
1046	}
1047
1048	intel_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end);
1049
1050	ggtt->gmadr =
1051		(struct resource)DEFINE_RES_MEM(gmadr_base, ggtt->mappable_end);
1052
1053	ggtt->do_idle_maps = needs_idle_maps(i915);
1054	ggtt->vm.insert_page = i915_ggtt_insert_page;
1055	ggtt->vm.insert_entries = i915_ggtt_insert_entries;
1056	ggtt->vm.clear_range = i915_ggtt_clear_range;
1057	ggtt->vm.cleanup = i915_gmch_remove;
1058
1059	ggtt->invalidate = gmch_ggtt_invalidate;
1060
1061	ggtt->vm.vma_ops.bind_vma    = ggtt_bind_vma;
1062	ggtt->vm.vma_ops.unbind_vma  = ggtt_unbind_vma;
1063	ggtt->vm.vma_ops.set_pages   = ggtt_set_pages;
1064	ggtt->vm.vma_ops.clear_pages = clear_pages;
1065
1066	if (unlikely(ggtt->do_idle_maps))
1067		drm_notice(&i915->drm,
1068			   "Applying Ironlake quirks for intel_iommu\n");
1069
1070	return 0;
1071}
1072
1073static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt)
1074{
1075	struct drm_i915_private *i915 = gt->i915;
1076	int ret;
1077
1078	ggtt->vm.gt = gt;
1079	ggtt->vm.i915 = i915;
1080	ggtt->vm.dma = &i915->drm.pdev->dev;
1081
1082	if (INTEL_GEN(i915) <= 5)
1083		ret = i915_gmch_probe(ggtt);
1084	else if (INTEL_GEN(i915) < 8)
1085		ret = gen6_gmch_probe(ggtt);
1086	else
1087		ret = gen8_gmch_probe(ggtt);
1088	if (ret)
1089		return ret;
1090
1091	if ((ggtt->vm.total - 1) >> 32) {
1092		drm_err(&i915->drm,
1093			"We never expected a Global GTT with more than 32bits"
1094			" of address space! Found %lldM!\n",
1095			ggtt->vm.total >> 20);
1096		ggtt->vm.total = 1ULL << 32;
1097		ggtt->mappable_end =
1098			min_t(u64, ggtt->mappable_end, ggtt->vm.total);
1099	}
1100
1101	if (ggtt->mappable_end > ggtt->vm.total) {
1102		drm_err(&i915->drm,
1103			"mappable aperture extends past end of GGTT,"
1104			" aperture=%pa, total=%llx\n",
1105			&ggtt->mappable_end, ggtt->vm.total);
1106		ggtt->mappable_end = ggtt->vm.total;
1107	}
1108
1109	/* GMADR is the PCI mmio aperture into the global GTT. */
1110	drm_dbg(&i915->drm, "GGTT size = %lluM\n", ggtt->vm.total >> 20);
1111	drm_dbg(&i915->drm, "GMADR size = %lluM\n",
1112		(u64)ggtt->mappable_end >> 20);
1113	drm_dbg(&i915->drm, "DSM size = %lluM\n",
1114		(u64)resource_size(&intel_graphics_stolen_res) >> 20);
1115
1116	return 0;
1117}
1118
1119/**
1120 * i915_ggtt_probe_hw - Probe GGTT hardware location
1121 * @i915: i915 device
1122 */
1123int i915_ggtt_probe_hw(struct drm_i915_private *i915)
1124{
1125	int ret;
1126
1127	ret = ggtt_probe_hw(&i915->ggtt, &i915->gt);
1128	if (ret)
1129		return ret;
1130
1131	if (intel_vtd_active())
1132		drm_info(&i915->drm, "VT-d active for gfx access\n");
1133
1134	return 0;
1135}
1136
1137int i915_ggtt_enable_hw(struct drm_i915_private *i915)
1138{
1139	if (INTEL_GEN(i915) < 6 && !intel_enable_gtt())
1140		return -EIO;
1141
1142	return 0;
1143}
1144
1145void i915_ggtt_enable_guc(struct i915_ggtt *ggtt)
1146{
1147	GEM_BUG_ON(ggtt->invalidate != gen8_ggtt_invalidate);
1148
1149	ggtt->invalidate = guc_ggtt_invalidate;
1150
1151	ggtt->invalidate(ggtt);
1152}
1153
1154void i915_ggtt_disable_guc(struct i915_ggtt *ggtt)
1155{
1156	/* XXX Temporary pardon for error unload */
1157	if (ggtt->invalidate == gen8_ggtt_invalidate)
1158		return;
1159
1160	/* We should only be called after i915_ggtt_enable_guc() */
1161	GEM_BUG_ON(ggtt->invalidate != guc_ggtt_invalidate);
1162
1163	ggtt->invalidate = gen8_ggtt_invalidate;
1164
1165	ggtt->invalidate(ggtt);
1166}
1167
1168static unsigned int clear_bind(struct i915_vma *vma)
1169{
1170	return atomic_fetch_and(~I915_VMA_BIND_MASK, &vma->flags);
1171}
1172
1173void i915_ggtt_resume(struct i915_ggtt *ggtt)
1174{
1175	struct i915_vma *vma;
1176	bool flush = false;
1177	int open;
1178
1179	intel_gt_check_and_clear_faults(ggtt->vm.gt);
1180
1181	/* First fill our portion of the GTT with scratch pages */
1182	ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
1183
1184	/* Skip rewriting PTE on VMA unbind. */
1185	open = atomic_xchg(&ggtt->vm.open, 0);
1186
1187	/* clflush objects bound into the GGTT and rebind them. */
1188	list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link) {
1189		struct drm_i915_gem_object *obj = vma->obj;
1190		unsigned int was_bound = clear_bind(vma);
1191
1192		WARN_ON(i915_vma_bind(vma,
1193				      obj ? obj->cache_level : 0,
1194				      was_bound, NULL));
1195		if (obj) { /* only used during resume => exclusive access */
1196			flush |= fetch_and_zero(&obj->write_domain);
1197			obj->read_domains |= I915_GEM_DOMAIN_GTT;
1198		}
1199	}
1200
1201	atomic_set(&ggtt->vm.open, open);
1202	ggtt->invalidate(ggtt);
1203
1204	if (flush)
1205		wbinvd_on_all_cpus();
1206
1207	if (INTEL_GEN(ggtt->vm.i915) >= 8)
1208		setup_private_pat(ggtt->vm.gt->uncore);
1209
1210	intel_ggtt_restore_fences(ggtt);
1211}
1212
1213static struct scatterlist *
1214rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset,
1215	     unsigned int width, unsigned int height,
1216	     unsigned int stride,
1217	     struct sg_table *st, struct scatterlist *sg)
1218{
1219	unsigned int column, row;
1220	unsigned int src_idx;
1221
1222	for (column = 0; column < width; column++) {
1223		src_idx = stride * (height - 1) + column + offset;
1224		for (row = 0; row < height; row++) {
1225			st->nents++;
1226			/*
1227			 * We don't need the pages, but need to initialize
1228			 * the entries so the sg list can be happily traversed.
1229			 * The only thing we need are DMA addresses.
1230			 */
1231			sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0);
1232			sg_dma_address(sg) =
1233				i915_gem_object_get_dma_address(obj, src_idx);
1234			sg_dma_len(sg) = I915_GTT_PAGE_SIZE;
1235			sg = sg_next(sg);
1236			src_idx -= stride;
1237		}
1238	}
1239
1240	return sg;
1241}
1242
1243static noinline struct sg_table *
1244intel_rotate_pages(struct intel_rotation_info *rot_info,
1245		   struct drm_i915_gem_object *obj)
1246{
1247	unsigned int size = intel_rotation_info_size(rot_info);
1248	struct drm_i915_private *i915 = to_i915(obj->base.dev);
1249	struct sg_table *st;
1250	struct scatterlist *sg;
1251	int ret = -ENOMEM;
1252	int i;
1253
1254	/* Allocate target SG list. */
1255	st = kmalloc(sizeof(*st), GFP_KERNEL);
1256	if (!st)
1257		goto err_st_alloc;
1258
1259	ret = sg_alloc_table(st, size, GFP_KERNEL);
1260	if (ret)
1261		goto err_sg_alloc;
1262
1263	st->nents = 0;
1264	sg = st->sgl;
1265
1266	for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) {
1267		sg = rotate_pages(obj, rot_info->plane[i].offset,
1268				  rot_info->plane[i].width, rot_info->plane[i].height,
1269				  rot_info->plane[i].stride, st, sg);
1270	}
1271
1272	return st;
1273
1274err_sg_alloc:
1275	kfree(st);
1276err_st_alloc:
1277
1278	drm_dbg(&i915->drm, "Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
1279		obj->base.size, rot_info->plane[0].width,
1280		rot_info->plane[0].height, size);
1281
1282	return ERR_PTR(ret);
1283}
1284
1285static struct scatterlist *
1286remap_pages(struct drm_i915_gem_object *obj, unsigned int offset,
1287	    unsigned int width, unsigned int height,
1288	    unsigned int stride,
1289	    struct sg_table *st, struct scatterlist *sg)
1290{
1291	unsigned int row;
1292
1293	for (row = 0; row < height; row++) {
1294		unsigned int left = width * I915_GTT_PAGE_SIZE;
1295
1296		while (left) {
1297			dma_addr_t addr;
1298			unsigned int length;
1299
1300			/*
1301			 * We don't need the pages, but need to initialize
1302			 * the entries so the sg list can be happily traversed.
1303			 * The only thing we need are DMA addresses.
1304			 */
1305
1306			addr = i915_gem_object_get_dma_address_len(obj, offset, &length);
1307
1308			length = min(left, length);
1309
1310			st->nents++;
1311
1312			sg_set_page(sg, NULL, length, 0);
1313			sg_dma_address(sg) = addr;
1314			sg_dma_len(sg) = length;
1315			sg = sg_next(sg);
1316
1317			offset += length / I915_GTT_PAGE_SIZE;
1318			left -= length;
1319		}
1320
1321		offset += stride - width;
1322	}
1323
1324	return sg;
1325}
1326
1327static noinline struct sg_table *
1328intel_remap_pages(struct intel_remapped_info *rem_info,
1329		  struct drm_i915_gem_object *obj)
1330{
1331	unsigned int size = intel_remapped_info_size(rem_info);
1332	struct drm_i915_private *i915 = to_i915(obj->base.dev);
1333	struct sg_table *st;
1334	struct scatterlist *sg;
1335	int ret = -ENOMEM;
1336	int i;
1337
1338	/* Allocate target SG list. */
1339	st = kmalloc(sizeof(*st), GFP_KERNEL);
1340	if (!st)
1341		goto err_st_alloc;
1342
1343	ret = sg_alloc_table(st, size, GFP_KERNEL);
1344	if (ret)
1345		goto err_sg_alloc;
1346
1347	st->nents = 0;
1348	sg = st->sgl;
1349
1350	for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) {
1351		sg = remap_pages(obj, rem_info->plane[i].offset,
1352				 rem_info->plane[i].width, rem_info->plane[i].height,
1353				 rem_info->plane[i].stride, st, sg);
1354	}
1355
1356	i915_sg_trim(st);
1357
1358	return st;
1359
1360err_sg_alloc:
1361	kfree(st);
1362err_st_alloc:
1363
1364	drm_dbg(&i915->drm, "Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n",
1365		obj->base.size, rem_info->plane[0].width,
1366		rem_info->plane[0].height, size);
1367
1368	return ERR_PTR(ret);
1369}
1370
1371static noinline struct sg_table *
1372intel_partial_pages(const struct i915_ggtt_view *view,
1373		    struct drm_i915_gem_object *obj)
1374{
1375	struct sg_table *st;
1376	struct scatterlist *sg, *iter;
1377	unsigned int count = view->partial.size;
1378	unsigned int offset;
1379	int ret = -ENOMEM;
1380
1381	st = kmalloc(sizeof(*st), GFP_KERNEL);
1382	if (!st)
1383		goto err_st_alloc;
1384
1385	ret = sg_alloc_table(st, count, GFP_KERNEL);
1386	if (ret)
1387		goto err_sg_alloc;
1388
1389	iter = i915_gem_object_get_sg(obj, view->partial.offset, &offset);
1390	GEM_BUG_ON(!iter);
1391
1392	sg = st->sgl;
1393	st->nents = 0;
1394	do {
1395		unsigned int len;
1396
1397		len = min(iter->length - (offset << PAGE_SHIFT),
1398			  count << PAGE_SHIFT);
1399		sg_set_page(sg, NULL, len, 0);
1400		sg_dma_address(sg) =
1401			sg_dma_address(iter) + (offset << PAGE_SHIFT);
1402		sg_dma_len(sg) = len;
1403
1404		st->nents++;
1405		count -= len >> PAGE_SHIFT;
1406		if (count == 0) {
1407			sg_mark_end(sg);
1408			i915_sg_trim(st); /* Drop any unused tail entries. */
1409
1410			return st;
1411		}
1412
1413		sg = __sg_next(sg);
1414		iter = __sg_next(iter);
1415		offset = 0;
1416	} while (1);
1417
1418err_sg_alloc:
1419	kfree(st);
1420err_st_alloc:
1421	return ERR_PTR(ret);
1422}
1423
1424static int
1425i915_get_ggtt_vma_pages(struct i915_vma *vma)
1426{
1427	int ret;
1428
1429	/*
1430	 * The vma->pages are only valid within the lifespan of the borrowed
1431	 * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so
1432	 * must be the vma->pages. A simple rule is that vma->pages must only
1433	 * be accessed when the obj->mm.pages are pinned.
1434	 */
1435	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
1436
1437	switch (vma->ggtt_view.type) {
1438	default:
1439		GEM_BUG_ON(vma->ggtt_view.type);
1440		fallthrough;
1441	case I915_GGTT_VIEW_NORMAL:
1442		vma->pages = vma->obj->mm.pages;
1443		return 0;
1444
1445	case I915_GGTT_VIEW_ROTATED:
1446		vma->pages =
1447			intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj);
1448		break;
1449
1450	case I915_GGTT_VIEW_REMAPPED:
1451		vma->pages =
1452			intel_remap_pages(&vma->ggtt_view.remapped, vma->obj);
1453		break;
1454
1455	case I915_GGTT_VIEW_PARTIAL:
1456		vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
1457		break;
1458	}
1459
1460	ret = 0;
1461	if (IS_ERR(vma->pages)) {
1462		ret = PTR_ERR(vma->pages);
1463		vma->pages = NULL;
1464		drm_err(&vma->vm->i915->drm,
1465			"Failed to get pages for VMA view type %u (%d)!\n",
1466			vma->ggtt_view.type, ret);
1467	}
1468	return ret;
1469}