Linux Audio

Check our new training course

Loading...
v5.4
   1/*
   2 * Copyright © 2010 Daniel Vetter
   3 * Copyright © 2011-2014 Intel Corporation
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the "Software"),
   7 * to deal in the Software without restriction, including without limitation
   8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   9 * and/or sell copies of the Software, and to permit persons to whom the
  10 * Software is furnished to do so, subject to the following conditions:
  11 *
  12 * The above copyright notice and this permission notice (including the next
  13 * paragraph) shall be included in all copies or substantial portions of the
  14 * Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  22 * IN THE SOFTWARE.
  23 *
  24 */
  25
  26#include <linux/slab.h> /* fault-inject.h is not standalone! */
  27
  28#include <linux/fault-inject.h>
  29#include <linux/log2.h>
  30#include <linux/random.h>
  31#include <linux/seq_file.h>
  32#include <linux/stop_machine.h>
  33
  34#include <asm/set_memory.h>
  35#include <asm/smp.h>
  36
  37#include <drm/i915_drm.h>
  38
  39#include "display/intel_frontbuffer.h"
  40#include "gt/intel_gt.h"
  41
  42#include "i915_drv.h"
  43#include "i915_scatterlist.h"
  44#include "i915_trace.h"
  45#include "i915_vgpu.h"
  46
  47#define I915_GFP_ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
  48
  49#if IS_ENABLED(CONFIG_DRM_I915_TRACE_GTT)
  50#define DBG(...) trace_printk(__VA_ARGS__)
  51#else
  52#define DBG(...)
  53#endif
  54
  55/**
  56 * DOC: Global GTT views
  57 *
  58 * Background and previous state
  59 *
  60 * Historically objects could exists (be bound) in global GTT space only as
  61 * singular instances with a view representing all of the object's backing pages
  62 * in a linear fashion. This view will be called a normal view.
  63 *
  64 * To support multiple views of the same object, where the number of mapped
  65 * pages is not equal to the backing store, or where the layout of the pages
  66 * is not linear, concept of a GGTT view was added.
  67 *
  68 * One example of an alternative view is a stereo display driven by a single
  69 * image. In this case we would have a framebuffer looking like this
  70 * (2x2 pages):
  71 *
  72 *    12
  73 *    34
  74 *
  75 * Above would represent a normal GGTT view as normally mapped for GPU or CPU
  76 * rendering. In contrast, fed to the display engine would be an alternative
  77 * view which could look something like this:
  78 *
  79 *   1212
  80 *   3434
  81 *
  82 * In this example both the size and layout of pages in the alternative view is
  83 * different from the normal view.
  84 *
  85 * Implementation and usage
  86 *
  87 * GGTT views are implemented using VMAs and are distinguished via enum
  88 * i915_ggtt_view_type and struct i915_ggtt_view.
  89 *
  90 * A new flavour of core GEM functions which work with GGTT bound objects were
  91 * added with the _ggtt_ infix, and sometimes with _view postfix to avoid
  92 * renaming  in large amounts of code. They take the struct i915_ggtt_view
  93 * parameter encapsulating all metadata required to implement a view.
  94 *
  95 * As a helper for callers which are only interested in the normal view,
  96 * globally const i915_ggtt_view_normal singleton instance exists. All old core
  97 * GEM API functions, the ones not taking the view parameter, are operating on,
  98 * or with the normal GGTT view.
  99 *
 100 * Code wanting to add or use a new GGTT view needs to:
 101 *
 102 * 1. Add a new enum with a suitable name.
 103 * 2. Extend the metadata in the i915_ggtt_view structure if required.
 104 * 3. Add support to i915_get_vma_pages().
 105 *
 106 * New views are required to build a scatter-gather table from within the
 107 * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and
 108 * exists for the lifetime of an VMA.
 109 *
 110 * Core API is designed to have copy semantics which means that passed in
 111 * struct i915_ggtt_view does not need to be persistent (left around after
 112 * calling the core API functions).
 113 *
 114 */
 115
 116#define as_pd(x) container_of((x), typeof(struct i915_page_directory), pt)
 117
 118static int
 119i915_get_ggtt_vma_pages(struct i915_vma *vma);
 120
 121static void gen6_ggtt_invalidate(struct i915_ggtt *ggtt)
 122{
 123	struct intel_uncore *uncore = ggtt->vm.gt->uncore;
 124
 125	/*
 126	 * Note that as an uncached mmio write, this will flush the
 127	 * WCB of the writes into the GGTT before it triggers the invalidate.
 128	 */
 129	intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
 130}
 131
 132static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
 133{
 134	struct intel_uncore *uncore = ggtt->vm.gt->uncore;
 135
 136	gen6_ggtt_invalidate(ggtt);
 137	intel_uncore_write_fw(uncore, GEN8_GTCR, GEN8_GTCR_INVALIDATE);
 138}
 139
 140static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt)
 141{
 142	intel_gtt_chipset_flush();
 143}
 144
 145static int ppgtt_bind_vma(struct i915_vma *vma,
 146			  enum i915_cache_level cache_level,
 147			  u32 unused)
 148{
 149	u32 pte_flags;
 150	int err;
 151
 152	if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
 153		err = vma->vm->allocate_va_range(vma->vm,
 154						 vma->node.start, vma->size);
 155		if (err)
 156			return err;
 157	}
 158
 159	/* Applicable to VLV, and gen8+ */
 160	pte_flags = 0;
 161	if (i915_gem_object_is_readonly(vma->obj))
 162		pte_flags |= PTE_READ_ONLY;
 163
 164	vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
 165
 166	return 0;
 167}
 168
 169static void ppgtt_unbind_vma(struct i915_vma *vma)
 170{
 171	vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
 172}
 173
 174static int ppgtt_set_pages(struct i915_vma *vma)
 175{
 176	GEM_BUG_ON(vma->pages);
 177
 178	vma->pages = vma->obj->mm.pages;
 179
 180	vma->page_sizes = vma->obj->mm.page_sizes;
 181
 182	return 0;
 183}
 184
 185static void clear_pages(struct i915_vma *vma)
 186{
 187	GEM_BUG_ON(!vma->pages);
 188
 189	if (vma->pages != vma->obj->mm.pages) {
 190		sg_free_table(vma->pages);
 191		kfree(vma->pages);
 192	}
 193	vma->pages = NULL;
 194
 195	memset(&vma->page_sizes, 0, sizeof(vma->page_sizes));
 196}
 197
 198static u64 gen8_pte_encode(dma_addr_t addr,
 199			   enum i915_cache_level level,
 200			   u32 flags)
 201{
 202	gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW;
 203
 204	if (unlikely(flags & PTE_READ_ONLY))
 205		pte &= ~_PAGE_RW;
 206
 207	switch (level) {
 208	case I915_CACHE_NONE:
 209		pte |= PPAT_UNCACHED;
 210		break;
 211	case I915_CACHE_WT:
 212		pte |= PPAT_DISPLAY_ELLC;
 213		break;
 214	default:
 215		pte |= PPAT_CACHED;
 216		break;
 217	}
 218
 219	return pte;
 220}
 221
 222static u64 gen8_pde_encode(const dma_addr_t addr,
 223			   const enum i915_cache_level level)
 224{
 225	u64 pde = _PAGE_PRESENT | _PAGE_RW;
 226	pde |= addr;
 227	if (level != I915_CACHE_NONE)
 228		pde |= PPAT_CACHED_PDE;
 229	else
 230		pde |= PPAT_UNCACHED;
 231	return pde;
 232}
 233
 234static u64 snb_pte_encode(dma_addr_t addr,
 235			  enum i915_cache_level level,
 236			  u32 flags)
 237{
 238	gen6_pte_t pte = GEN6_PTE_VALID;
 239	pte |= GEN6_PTE_ADDR_ENCODE(addr);
 240
 241	switch (level) {
 242	case I915_CACHE_L3_LLC:
 243	case I915_CACHE_LLC:
 244		pte |= GEN6_PTE_CACHE_LLC;
 245		break;
 246	case I915_CACHE_NONE:
 247		pte |= GEN6_PTE_UNCACHED;
 248		break;
 249	default:
 250		MISSING_CASE(level);
 251	}
 252
 253	return pte;
 254}
 255
 256static u64 ivb_pte_encode(dma_addr_t addr,
 257			  enum i915_cache_level level,
 258			  u32 flags)
 259{
 260	gen6_pte_t pte = GEN6_PTE_VALID;
 261	pte |= GEN6_PTE_ADDR_ENCODE(addr);
 262
 263	switch (level) {
 264	case I915_CACHE_L3_LLC:
 265		pte |= GEN7_PTE_CACHE_L3_LLC;
 266		break;
 267	case I915_CACHE_LLC:
 268		pte |= GEN6_PTE_CACHE_LLC;
 269		break;
 270	case I915_CACHE_NONE:
 271		pte |= GEN6_PTE_UNCACHED;
 272		break;
 273	default:
 274		MISSING_CASE(level);
 275	}
 276
 277	return pte;
 278}
 279
 280static u64 byt_pte_encode(dma_addr_t addr,
 281			  enum i915_cache_level level,
 282			  u32 flags)
 283{
 284	gen6_pte_t pte = GEN6_PTE_VALID;
 285	pte |= GEN6_PTE_ADDR_ENCODE(addr);
 286
 287	if (!(flags & PTE_READ_ONLY))
 288		pte |= BYT_PTE_WRITEABLE;
 289
 290	if (level != I915_CACHE_NONE)
 291		pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
 292
 293	return pte;
 294}
 295
 296static u64 hsw_pte_encode(dma_addr_t addr,
 297			  enum i915_cache_level level,
 298			  u32 flags)
 299{
 300	gen6_pte_t pte = GEN6_PTE_VALID;
 301	pte |= HSW_PTE_ADDR_ENCODE(addr);
 302
 303	if (level != I915_CACHE_NONE)
 304		pte |= HSW_WB_LLC_AGE3;
 305
 306	return pte;
 307}
 308
 309static u64 iris_pte_encode(dma_addr_t addr,
 310			   enum i915_cache_level level,
 311			   u32 flags)
 312{
 313	gen6_pte_t pte = GEN6_PTE_VALID;
 314	pte |= HSW_PTE_ADDR_ENCODE(addr);
 315
 316	switch (level) {
 317	case I915_CACHE_NONE:
 318		break;
 319	case I915_CACHE_WT:
 320		pte |= HSW_WT_ELLC_LLC_AGE3;
 321		break;
 322	default:
 323		pte |= HSW_WB_ELLC_LLC_AGE3;
 324		break;
 325	}
 326
 327	return pte;
 328}
 329
 330static void stash_init(struct pagestash *stash)
 331{
 332	pagevec_init(&stash->pvec);
 333	spin_lock_init(&stash->lock);
 334}
 335
 336static struct page *stash_pop_page(struct pagestash *stash)
 337{
 338	struct page *page = NULL;
 339
 340	spin_lock(&stash->lock);
 341	if (likely(stash->pvec.nr))
 342		page = stash->pvec.pages[--stash->pvec.nr];
 343	spin_unlock(&stash->lock);
 344
 345	return page;
 346}
 347
 348static void stash_push_pagevec(struct pagestash *stash, struct pagevec *pvec)
 349{
 350	unsigned int nr;
 351
 352	spin_lock_nested(&stash->lock, SINGLE_DEPTH_NESTING);
 353
 354	nr = min_t(typeof(nr), pvec->nr, pagevec_space(&stash->pvec));
 355	memcpy(stash->pvec.pages + stash->pvec.nr,
 356	       pvec->pages + pvec->nr - nr,
 357	       sizeof(pvec->pages[0]) * nr);
 358	stash->pvec.nr += nr;
 359
 360	spin_unlock(&stash->lock);
 361
 362	pvec->nr -= nr;
 363}
 364
 365static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
 366{
 367	struct pagevec stack;
 368	struct page *page;
 369
 370	if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
 371		i915_gem_shrink_all(vm->i915);
 372
 373	page = stash_pop_page(&vm->free_pages);
 374	if (page)
 375		return page;
 376
 377	if (!vm->pt_kmap_wc)
 378		return alloc_page(gfp);
 379
 380	/* Look in our global stash of WC pages... */
 381	page = stash_pop_page(&vm->i915->mm.wc_stash);
 382	if (page)
 383		return page;
 384
 385	/*
 386	 * Otherwise batch allocate pages to amortize cost of set_pages_wc.
 387	 *
 388	 * We have to be careful as page allocation may trigger the shrinker
 389	 * (via direct reclaim) which will fill up the WC stash underneath us.
 390	 * So we add our WB pages into a temporary pvec on the stack and merge
 391	 * them into the WC stash after all the allocations are complete.
 392	 */
 393	pagevec_init(&stack);
 394	do {
 395		struct page *page;
 396
 397		page = alloc_page(gfp);
 398		if (unlikely(!page))
 399			break;
 400
 401		stack.pages[stack.nr++] = page;
 402	} while (pagevec_space(&stack));
 403
 404	if (stack.nr && !set_pages_array_wc(stack.pages, stack.nr)) {
 405		page = stack.pages[--stack.nr];
 406
 407		/* Merge spare WC pages to the global stash */
 408		if (stack.nr)
 409			stash_push_pagevec(&vm->i915->mm.wc_stash, &stack);
 410
 411		/* Push any surplus WC pages onto the local VM stash */
 412		if (stack.nr)
 413			stash_push_pagevec(&vm->free_pages, &stack);
 414	}
 415
 416	/* Return unwanted leftovers */
 417	if (unlikely(stack.nr)) {
 418		WARN_ON_ONCE(set_pages_array_wb(stack.pages, stack.nr));
 419		__pagevec_release(&stack);
 420	}
 421
 422	return page;
 423}
 424
 425static void vm_free_pages_release(struct i915_address_space *vm,
 426				  bool immediate)
 427{
 428	struct pagevec *pvec = &vm->free_pages.pvec;
 429	struct pagevec stack;
 430
 431	lockdep_assert_held(&vm->free_pages.lock);
 432	GEM_BUG_ON(!pagevec_count(pvec));
 433
 434	if (vm->pt_kmap_wc) {
 435		/*
 436		 * When we use WC, first fill up the global stash and then
 437		 * only if full immediately free the overflow.
 438		 */
 439		stash_push_pagevec(&vm->i915->mm.wc_stash, pvec);
 440
 441		/*
 442		 * As we have made some room in the VM's free_pages,
 443		 * we can wait for it to fill again. Unless we are
 444		 * inside i915_address_space_fini() and must
 445		 * immediately release the pages!
 446		 */
 447		if (pvec->nr <= (immediate ? 0 : PAGEVEC_SIZE - 1))
 448			return;
 449
 450		/*
 451		 * We have to drop the lock to allow ourselves to sleep,
 452		 * so take a copy of the pvec and clear the stash for
 453		 * others to use it as we sleep.
 454		 */
 455		stack = *pvec;
 456		pagevec_reinit(pvec);
 457		spin_unlock(&vm->free_pages.lock);
 458
 459		pvec = &stack;
 460		set_pages_array_wb(pvec->pages, pvec->nr);
 461
 462		spin_lock(&vm->free_pages.lock);
 463	}
 464
 465	__pagevec_release(pvec);
 466}
 467
 468static void vm_free_page(struct i915_address_space *vm, struct page *page)
 469{
 470	/*
 471	 * On !llc, we need to change the pages back to WB. We only do so
 472	 * in bulk, so we rarely need to change the page attributes here,
 473	 * but doing so requires a stop_machine() from deep inside arch/x86/mm.
 474	 * To make detection of the possible sleep more likely, use an
 475	 * unconditional might_sleep() for everybody.
 476	 */
 477	might_sleep();
 478	spin_lock(&vm->free_pages.lock);
 479	while (!pagevec_space(&vm->free_pages.pvec))
 480		vm_free_pages_release(vm, false);
 481	GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec) >= PAGEVEC_SIZE);
 482	pagevec_add(&vm->free_pages.pvec, page);
 483	spin_unlock(&vm->free_pages.lock);
 484}
 485
 486static void i915_address_space_fini(struct i915_address_space *vm)
 487{
 488	spin_lock(&vm->free_pages.lock);
 489	if (pagevec_count(&vm->free_pages.pvec))
 490		vm_free_pages_release(vm, true);
 491	GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec));
 492	spin_unlock(&vm->free_pages.lock);
 493
 494	drm_mm_takedown(&vm->mm);
 495
 496	mutex_destroy(&vm->mutex);
 497}
 498
 499static void ppgtt_destroy_vma(struct i915_address_space *vm)
 500{
 501	struct list_head *phases[] = {
 502		&vm->bound_list,
 503		&vm->unbound_list,
 504		NULL,
 505	}, **phase;
 506
 507	mutex_lock(&vm->i915->drm.struct_mutex);
 508	for (phase = phases; *phase; phase++) {
 509		struct i915_vma *vma, *vn;
 510
 511		list_for_each_entry_safe(vma, vn, *phase, vm_link)
 512			i915_vma_destroy(vma);
 513	}
 514	mutex_unlock(&vm->i915->drm.struct_mutex);
 515}
 516
 517static void __i915_vm_release(struct work_struct *work)
 518{
 519	struct i915_address_space *vm =
 520		container_of(work, struct i915_address_space, rcu.work);
 521
 522	ppgtt_destroy_vma(vm);
 523
 524	GEM_BUG_ON(!list_empty(&vm->bound_list));
 525	GEM_BUG_ON(!list_empty(&vm->unbound_list));
 526
 527	vm->cleanup(vm);
 528	i915_address_space_fini(vm);
 529
 530	kfree(vm);
 531}
 532
 533void i915_vm_release(struct kref *kref)
 534{
 535	struct i915_address_space *vm =
 536		container_of(kref, struct i915_address_space, ref);
 537
 538	GEM_BUG_ON(i915_is_ggtt(vm));
 539	trace_i915_ppgtt_release(vm);
 540
 541	vm->closed = true;
 542	queue_rcu_work(vm->i915->wq, &vm->rcu);
 543}
 544
 545static void i915_address_space_init(struct i915_address_space *vm, int subclass)
 546{
 547	kref_init(&vm->ref);
 548	INIT_RCU_WORK(&vm->rcu, __i915_vm_release);
 549
 550	/*
 551	 * The vm->mutex must be reclaim safe (for use in the shrinker).
 552	 * Do a dummy acquire now under fs_reclaim so that any allocation
 553	 * attempt holding the lock is immediately reported by lockdep.
 554	 */
 555	mutex_init(&vm->mutex);
 556	lockdep_set_subclass(&vm->mutex, subclass);
 557	i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex);
 558
 559	GEM_BUG_ON(!vm->total);
 560	drm_mm_init(&vm->mm, 0, vm->total);
 561	vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
 562
 563	stash_init(&vm->free_pages);
 564
 565	INIT_LIST_HEAD(&vm->unbound_list);
 566	INIT_LIST_HEAD(&vm->bound_list);
 567}
 568
 569static int __setup_page_dma(struct i915_address_space *vm,
 570			    struct i915_page_dma *p,
 571			    gfp_t gfp)
 572{
 573	p->page = vm_alloc_page(vm, gfp | I915_GFP_ALLOW_FAIL);
 574	if (unlikely(!p->page))
 575		return -ENOMEM;
 576
 577	p->daddr = dma_map_page_attrs(vm->dma,
 578				      p->page, 0, PAGE_SIZE,
 579				      PCI_DMA_BIDIRECTIONAL,
 580				      DMA_ATTR_SKIP_CPU_SYNC |
 581				      DMA_ATTR_NO_WARN);
 582	if (unlikely(dma_mapping_error(vm->dma, p->daddr))) {
 583		vm_free_page(vm, p->page);
 584		return -ENOMEM;
 585	}
 586
 587	return 0;
 588}
 589
 590static int setup_page_dma(struct i915_address_space *vm,
 591			  struct i915_page_dma *p)
 592{
 593	return __setup_page_dma(vm, p, __GFP_HIGHMEM);
 594}
 595
 596static void cleanup_page_dma(struct i915_address_space *vm,
 597			     struct i915_page_dma *p)
 598{
 599	dma_unmap_page(vm->dma, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
 600	vm_free_page(vm, p->page);
 601}
 602
 603#define kmap_atomic_px(px) kmap_atomic(px_base(px)->page)
 604
 605static void
 606fill_page_dma(const struct i915_page_dma *p, const u64 val, unsigned int count)
 607{
 608	kunmap_atomic(memset64(kmap_atomic(p->page), val, count));
 609}
 610
 611#define fill_px(px, v) fill_page_dma(px_base(px), (v), PAGE_SIZE / sizeof(u64))
 612#define fill32_px(px, v) do {						\
 613	u64 v__ = lower_32_bits(v);					\
 614	fill_px((px), v__ << 32 | v__);					\
 615} while (0)
 616
 617static int
 618setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
 619{
 620	unsigned long size;
 621
 622	/*
 623	 * In order to utilize 64K pages for an object with a size < 2M, we will
 624	 * need to support a 64K scratch page, given that every 16th entry for a
 625	 * page-table operating in 64K mode must point to a properly aligned 64K
 626	 * region, including any PTEs which happen to point to scratch.
 627	 *
 628	 * This is only relevant for the 48b PPGTT where we support
 629	 * huge-gtt-pages, see also i915_vma_insert(). However, as we share the
 630	 * scratch (read-only) between all vm, we create one 64k scratch page
 631	 * for all.
 632	 */
 633	size = I915_GTT_PAGE_SIZE_4K;
 634	if (i915_vm_is_4lvl(vm) &&
 635	    HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K)) {
 636		size = I915_GTT_PAGE_SIZE_64K;
 637		gfp |= __GFP_NOWARN;
 638	}
 639	gfp |= __GFP_ZERO | __GFP_RETRY_MAYFAIL;
 640
 641	do {
 642		unsigned int order = get_order(size);
 643		struct page *page;
 644		dma_addr_t addr;
 645
 646		page = alloc_pages(gfp, order);
 647		if (unlikely(!page))
 648			goto skip;
 649
 650		addr = dma_map_page_attrs(vm->dma,
 651					  page, 0, size,
 652					  PCI_DMA_BIDIRECTIONAL,
 653					  DMA_ATTR_SKIP_CPU_SYNC |
 654					  DMA_ATTR_NO_WARN);
 655		if (unlikely(dma_mapping_error(vm->dma, addr)))
 656			goto free_page;
 657
 658		if (unlikely(!IS_ALIGNED(addr, size)))
 659			goto unmap_page;
 660
 661		vm->scratch[0].base.page = page;
 662		vm->scratch[0].base.daddr = addr;
 663		vm->scratch_order = order;
 664		return 0;
 665
 666unmap_page:
 667		dma_unmap_page(vm->dma, addr, size, PCI_DMA_BIDIRECTIONAL);
 668free_page:
 669		__free_pages(page, order);
 670skip:
 671		if (size == I915_GTT_PAGE_SIZE_4K)
 672			return -ENOMEM;
 673
 674		size = I915_GTT_PAGE_SIZE_4K;
 675		gfp &= ~__GFP_NOWARN;
 676	} while (1);
 677}
 678
 679static void cleanup_scratch_page(struct i915_address_space *vm)
 680{
 681	struct i915_page_dma *p = px_base(&vm->scratch[0]);
 682	unsigned int order = vm->scratch_order;
 683
 684	dma_unmap_page(vm->dma, p->daddr, BIT(order) << PAGE_SHIFT,
 685		       PCI_DMA_BIDIRECTIONAL);
 686	__free_pages(p->page, order);
 687}
 688
 689static void free_scratch(struct i915_address_space *vm)
 690{
 691	int i;
 692
 693	if (!px_dma(&vm->scratch[0])) /* set to 0 on clones */
 694		return;
 695
 696	for (i = 1; i <= vm->top; i++) {
 697		if (!px_dma(&vm->scratch[i]))
 698			break;
 699		cleanup_page_dma(vm, px_base(&vm->scratch[i]));
 700	}
 701
 702	cleanup_scratch_page(vm);
 703}
 704
 705static struct i915_page_table *alloc_pt(struct i915_address_space *vm)
 706{
 707	struct i915_page_table *pt;
 708
 709	pt = kmalloc(sizeof(*pt), I915_GFP_ALLOW_FAIL);
 710	if (unlikely(!pt))
 711		return ERR_PTR(-ENOMEM);
 712
 713	if (unlikely(setup_page_dma(vm, &pt->base))) {
 714		kfree(pt);
 715		return ERR_PTR(-ENOMEM);
 716	}
 717
 718	atomic_set(&pt->used, 0);
 719	return pt;
 720}
 721
 722static struct i915_page_directory *__alloc_pd(size_t sz)
 723{
 724	struct i915_page_directory *pd;
 725
 726	pd = kzalloc(sz, I915_GFP_ALLOW_FAIL);
 727	if (unlikely(!pd))
 728		return NULL;
 729
 730	spin_lock_init(&pd->lock);
 731	return pd;
 732}
 733
 734static struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
 735{
 736	struct i915_page_directory *pd;
 737
 738	pd = __alloc_pd(sizeof(*pd));
 739	if (unlikely(!pd))
 740		return ERR_PTR(-ENOMEM);
 741
 742	if (unlikely(setup_page_dma(vm, px_base(pd)))) {
 743		kfree(pd);
 744		return ERR_PTR(-ENOMEM);
 745	}
 746
 747	return pd;
 748}
 749
 750static void free_pd(struct i915_address_space *vm, struct i915_page_dma *pd)
 751{
 752	cleanup_page_dma(vm, pd);
 753	kfree(pd);
 754}
 755
 756#define free_px(vm, px) free_pd(vm, px_base(px))
 757
 758static inline void
 759write_dma_entry(struct i915_page_dma * const pdma,
 760		const unsigned short idx,
 761		const u64 encoded_entry)
 762{
 763	u64 * const vaddr = kmap_atomic(pdma->page);
 764
 765	vaddr[idx] = encoded_entry;
 766	kunmap_atomic(vaddr);
 767}
 768
 769static inline void
 770__set_pd_entry(struct i915_page_directory * const pd,
 771	       const unsigned short idx,
 772	       struct i915_page_dma * const to,
 773	       u64 (*encode)(const dma_addr_t, const enum i915_cache_level))
 774{
 775	/* Each thread pre-pins the pd, and we may have a thread per pde. */
 776	GEM_BUG_ON(atomic_read(px_used(pd)) > 2 * ARRAY_SIZE(pd->entry));
 777
 778	atomic_inc(px_used(pd));
 779	pd->entry[idx] = to;
 780	write_dma_entry(px_base(pd), idx, encode(to->daddr, I915_CACHE_LLC));
 781}
 782
 783#define set_pd_entry(pd, idx, to) \
 784	__set_pd_entry((pd), (idx), px_base(to), gen8_pde_encode)
 785
 786static inline void
 787clear_pd_entry(struct i915_page_directory * const pd,
 788	       const unsigned short idx,
 789	       const struct i915_page_scratch * const scratch)
 790{
 791	GEM_BUG_ON(atomic_read(px_used(pd)) == 0);
 792
 793	write_dma_entry(px_base(pd), idx, scratch->encode);
 794	pd->entry[idx] = NULL;
 795	atomic_dec(px_used(pd));
 796}
 797
 798static bool
 799release_pd_entry(struct i915_page_directory * const pd,
 800		 const unsigned short idx,
 801		 struct i915_page_table * const pt,
 802		 const struct i915_page_scratch * const scratch)
 803{
 804	bool free = false;
 805
 806	if (atomic_add_unless(&pt->used, -1, 1))
 807		return false;
 808
 809	spin_lock(&pd->lock);
 810	if (atomic_dec_and_test(&pt->used)) {
 811		clear_pd_entry(pd, idx, scratch);
 812		free = true;
 813	}
 814	spin_unlock(&pd->lock);
 815
 816	return free;
 817}
 818
 819/*
 820 * PDE TLBs are a pain to invalidate on GEN8+. When we modify
 821 * the page table structures, we mark them dirty so that
 822 * context switching/execlist queuing code takes extra steps
 823 * to ensure that tlbs are flushed.
 824 */
 825static void mark_tlbs_dirty(struct i915_ppgtt *ppgtt)
 826{
 827	ppgtt->pd_dirty_engines = ALL_ENGINES;
 828}
 829
 830static void gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create)
 831{
 832	struct drm_i915_private *dev_priv = ppgtt->vm.i915;
 833	enum vgt_g2v_type msg;
 834	int i;
 835
 836	if (create)
 837		atomic_inc(px_used(ppgtt->pd)); /* never remove */
 838	else
 839		atomic_dec(px_used(ppgtt->pd));
 840
 841	mutex_lock(&dev_priv->vgpu.lock);
 842
 843	if (i915_vm_is_4lvl(&ppgtt->vm)) {
 844		const u64 daddr = px_dma(ppgtt->pd);
 845
 846		I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
 847		I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
 848
 849		msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
 850				VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY);
 851	} else {
 852		for (i = 0; i < GEN8_3LVL_PDPES; i++) {
 853			const u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
 854
 855			I915_WRITE(vgtif_reg(pdp[i].lo), lower_32_bits(daddr));
 856			I915_WRITE(vgtif_reg(pdp[i].hi), upper_32_bits(daddr));
 857		}
 858
 859		msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
 860				VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY);
 861	}
 862
 863	/* g2v_notify atomically (via hv trap) consumes the message packet. */
 864	I915_WRITE(vgtif_reg(g2v_notify), msg);
 865
 866	mutex_unlock(&dev_priv->vgpu.lock);
 867}
 868
 869/* Index shifts into the pagetable are offset by GEN8_PTE_SHIFT [12] */
 870#define GEN8_PAGE_SIZE (SZ_4K) /* page and page-directory sizes are the same */
 871#define GEN8_PTE_SHIFT (ilog2(GEN8_PAGE_SIZE))
 872#define GEN8_PDES (GEN8_PAGE_SIZE / sizeof(u64))
 873#define gen8_pd_shift(lvl) ((lvl) * ilog2(GEN8_PDES))
 874#define gen8_pd_index(i, lvl) i915_pde_index((i), gen8_pd_shift(lvl))
 875#define __gen8_pte_shift(lvl) (GEN8_PTE_SHIFT + gen8_pd_shift(lvl))
 876#define __gen8_pte_index(a, lvl) i915_pde_index((a), __gen8_pte_shift(lvl))
 877
 878static inline unsigned int
 879gen8_pd_range(u64 start, u64 end, int lvl, unsigned int *idx)
 880{
 881	const int shift = gen8_pd_shift(lvl);
 882	const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
 883
 884	GEM_BUG_ON(start >= end);
 885	end += ~mask >> gen8_pd_shift(1);
 886
 887	*idx = i915_pde_index(start, shift);
 888	if ((start ^ end) & mask)
 889		return GEN8_PDES - *idx;
 890	else
 891		return i915_pde_index(end, shift) - *idx;
 892}
 893
 894static inline bool gen8_pd_contains(u64 start, u64 end, int lvl)
 895{
 896	const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
 897
 898	GEM_BUG_ON(start >= end);
 899	return (start ^ end) & mask && (start & ~mask) == 0;
 900}
 901
 902static inline unsigned int gen8_pt_count(u64 start, u64 end)
 903{
 904	GEM_BUG_ON(start >= end);
 905	if ((start ^ end) >> gen8_pd_shift(1))
 906		return GEN8_PDES - (start & (GEN8_PDES - 1));
 907	else
 908		return end - start;
 909}
 910
 911static inline unsigned int gen8_pd_top_count(const struct i915_address_space *vm)
 912{
 913	unsigned int shift = __gen8_pte_shift(vm->top);
 914	return (vm->total + (1ull << shift) - 1) >> shift;
 915}
 916
 917static inline struct i915_page_directory *
 918gen8_pdp_for_page_index(struct i915_address_space * const vm, const u64 idx)
 919{
 920	struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
 921
 922	if (vm->top == 2)
 923		return ppgtt->pd;
 924	else
 925		return i915_pd_entry(ppgtt->pd, gen8_pd_index(idx, vm->top));
 926}
 927
 928static inline struct i915_page_directory *
 929gen8_pdp_for_page_address(struct i915_address_space * const vm, const u64 addr)
 930{
 931	return gen8_pdp_for_page_index(vm, addr >> GEN8_PTE_SHIFT);
 932}
 933
 934static void __gen8_ppgtt_cleanup(struct i915_address_space *vm,
 935				 struct i915_page_directory *pd,
 936				 int count, int lvl)
 937{
 938	if (lvl) {
 939		void **pde = pd->entry;
 940
 941		do {
 942			if (!*pde)
 943				continue;
 944
 945			__gen8_ppgtt_cleanup(vm, *pde, GEN8_PDES, lvl - 1);
 946		} while (pde++, --count);
 947	}
 948
 949	free_px(vm, pd);
 950}
 951
 952static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
 953{
 954	struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
 955
 956	if (intel_vgpu_active(vm->i915))
 957		gen8_ppgtt_notify_vgt(ppgtt, false);
 958
 959	__gen8_ppgtt_cleanup(vm, ppgtt->pd, gen8_pd_top_count(vm), vm->top);
 960	free_scratch(vm);
 961}
 962
 963static u64 __gen8_ppgtt_clear(struct i915_address_space * const vm,
 964			      struct i915_page_directory * const pd,
 965			      u64 start, const u64 end, int lvl)
 966{
 967	const struct i915_page_scratch * const scratch = &vm->scratch[lvl];
 968	unsigned int idx, len;
 969
 970	GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
 971
 972	len = gen8_pd_range(start, end, lvl--, &idx);
 973	DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n",
 974	    __func__, vm, lvl + 1, start, end,
 975	    idx, len, atomic_read(px_used(pd)));
 976	GEM_BUG_ON(!len || len >= atomic_read(px_used(pd)));
 977
 978	do {
 979		struct i915_page_table *pt = pd->entry[idx];
 980
 981		if (atomic_fetch_inc(&pt->used) >> gen8_pd_shift(1) &&
 982		    gen8_pd_contains(start, end, lvl)) {
 983			DBG("%s(%p):{ lvl:%d, idx:%d, start:%llx, end:%llx } removing pd\n",
 984			    __func__, vm, lvl + 1, idx, start, end);
 985			clear_pd_entry(pd, idx, scratch);
 986			__gen8_ppgtt_cleanup(vm, as_pd(pt), I915_PDES, lvl);
 987			start += (u64)I915_PDES << gen8_pd_shift(lvl);
 988			continue;
 989		}
 990
 991		if (lvl) {
 992			start = __gen8_ppgtt_clear(vm, as_pd(pt),
 993						   start, end, lvl);
 994		} else {
 995			unsigned int count;
 996			u64 *vaddr;
 997
 998			count = gen8_pt_count(start, end);
 999			DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } removing pte\n",
1000			    __func__, vm, lvl, start, end,
1001			    gen8_pd_index(start, 0), count,
1002			    atomic_read(&pt->used));
1003			GEM_BUG_ON(!count || count >= atomic_read(&pt->used));
1004
1005			vaddr = kmap_atomic_px(pt);
1006			memset64(vaddr + gen8_pd_index(start, 0),
1007				 vm->scratch[0].encode,
1008				 count);
1009			kunmap_atomic(vaddr);
1010
1011			atomic_sub(count, &pt->used);
1012			start += count;
1013		}
1014
1015		if (release_pd_entry(pd, idx, pt, scratch))
1016			free_px(vm, pt);
1017	} while (idx++, --len);
1018
1019	return start;
1020}
1021
1022static void gen8_ppgtt_clear(struct i915_address_space *vm,
1023			     u64 start, u64 length)
1024{
1025	GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
1026	GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
1027	GEM_BUG_ON(range_overflows(start, length, vm->total));
1028
1029	start >>= GEN8_PTE_SHIFT;
1030	length >>= GEN8_PTE_SHIFT;
1031	GEM_BUG_ON(length == 0);
1032
1033	__gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd,
1034			   start, start + length, vm->top);
1035}
1036
1037static int __gen8_ppgtt_alloc(struct i915_address_space * const vm,
1038			      struct i915_page_directory * const pd,
1039			      u64 * const start, const u64 end, int lvl)
1040{
1041	const struct i915_page_scratch * const scratch = &vm->scratch[lvl];
1042	struct i915_page_table *alloc = NULL;
1043	unsigned int idx, len;
1044	int ret = 0;
1045
1046	GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
1047
1048	len = gen8_pd_range(*start, end, lvl--, &idx);
1049	DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n",
1050	    __func__, vm, lvl + 1, *start, end,
1051	    idx, len, atomic_read(px_used(pd)));
1052	GEM_BUG_ON(!len || (idx + len - 1) >> gen8_pd_shift(1));
1053
1054	spin_lock(&pd->lock);
1055	GEM_BUG_ON(!atomic_read(px_used(pd))); /* Must be pinned! */
1056	do {
1057		struct i915_page_table *pt = pd->entry[idx];
1058
1059		if (!pt) {
1060			spin_unlock(&pd->lock);
1061
1062			DBG("%s(%p):{ lvl:%d, idx:%d } allocating new tree\n",
1063			    __func__, vm, lvl + 1, idx);
1064
1065			pt = fetch_and_zero(&alloc);
1066			if (lvl) {
1067				if (!pt) {
1068					pt = &alloc_pd(vm)->pt;
1069					if (IS_ERR(pt)) {
1070						ret = PTR_ERR(pt);
1071						goto out;
1072					}
1073				}
1074
1075				fill_px(pt, vm->scratch[lvl].encode);
1076			} else {
1077				if (!pt) {
1078					pt = alloc_pt(vm);
1079					if (IS_ERR(pt)) {
1080						ret = PTR_ERR(pt);
1081						goto out;
1082					}
1083				}
1084
1085				if (intel_vgpu_active(vm->i915) ||
1086				    gen8_pt_count(*start, end) < I915_PDES)
1087					fill_px(pt, vm->scratch[lvl].encode);
1088			}
1089
1090			spin_lock(&pd->lock);
1091			if (likely(!pd->entry[idx]))
1092				set_pd_entry(pd, idx, pt);
1093			else
1094				alloc = pt, pt = pd->entry[idx];
1095		}
1096
1097		if (lvl) {
1098			atomic_inc(&pt->used);
1099			spin_unlock(&pd->lock);
1100
1101			ret = __gen8_ppgtt_alloc(vm, as_pd(pt),
1102						 start, end, lvl);
1103			if (unlikely(ret)) {
1104				if (release_pd_entry(pd, idx, pt, scratch))
1105					free_px(vm, pt);
1106				goto out;
1107			}
1108
1109			spin_lock(&pd->lock);
1110			atomic_dec(&pt->used);
1111			GEM_BUG_ON(!atomic_read(&pt->used));
1112		} else {
1113			unsigned int count = gen8_pt_count(*start, end);
1114
1115			DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } inserting pte\n",
1116			    __func__, vm, lvl, *start, end,
1117			    gen8_pd_index(*start, 0), count,
1118			    atomic_read(&pt->used));
1119
1120			atomic_add(count, &pt->used);
1121			/* All other pdes may be simultaneously removed */
1122			GEM_BUG_ON(atomic_read(&pt->used) > 2 * I915_PDES);
1123			*start += count;
1124		}
1125	} while (idx++, --len);
1126	spin_unlock(&pd->lock);
1127out:
1128	if (alloc)
1129		free_px(vm, alloc);
1130	return ret;
1131}
1132
1133static int gen8_ppgtt_alloc(struct i915_address_space *vm,
1134			    u64 start, u64 length)
1135{
1136	u64 from;
1137	int err;
1138
1139	GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
1140	GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
1141	GEM_BUG_ON(range_overflows(start, length, vm->total));
1142
1143	start >>= GEN8_PTE_SHIFT;
1144	length >>= GEN8_PTE_SHIFT;
1145	GEM_BUG_ON(length == 0);
1146	from = start;
1147
1148	err = __gen8_ppgtt_alloc(vm, i915_vm_to_ppgtt(vm)->pd,
1149				 &start, start + length, vm->top);
1150	if (unlikely(err && from != start))
1151		__gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd,
1152				   from, start, vm->top);
1153
1154	return err;
1155}
1156
1157static inline struct sgt_dma {
1158	struct scatterlist *sg;
1159	dma_addr_t dma, max;
1160} sgt_dma(struct i915_vma *vma) {
1161	struct scatterlist *sg = vma->pages->sgl;
1162	dma_addr_t addr = sg_dma_address(sg);
1163	return (struct sgt_dma) { sg, addr, addr + sg->length };
1164}
1165
1166static __always_inline u64
1167gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
1168		      struct i915_page_directory *pdp,
1169		      struct sgt_dma *iter,
1170		      u64 idx,
1171		      enum i915_cache_level cache_level,
1172		      u32 flags)
1173{
1174	struct i915_page_directory *pd;
1175	const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
1176	gen8_pte_t *vaddr;
1177
1178	pd = i915_pd_entry(pdp, gen8_pd_index(idx, 2));
1179	vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
1180	do {
1181		vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma;
1182
1183		iter->dma += I915_GTT_PAGE_SIZE;
1184		if (iter->dma >= iter->max) {
1185			iter->sg = __sg_next(iter->sg);
1186			if (!iter->sg) {
1187				idx = 0;
1188				break;
1189			}
1190
1191			iter->dma = sg_dma_address(iter->sg);
1192			iter->max = iter->dma + iter->sg->length;
1193		}
1194
1195		if (gen8_pd_index(++idx, 0) == 0) {
1196			if (gen8_pd_index(idx, 1) == 0) {
1197				/* Limited by sg length for 3lvl */
1198				if (gen8_pd_index(idx, 2) == 0)
1199					break;
1200
1201				pd = pdp->entry[gen8_pd_index(idx, 2)];
1202			}
1203
1204			kunmap_atomic(vaddr);
1205			vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
1206		}
1207	} while (1);
1208	kunmap_atomic(vaddr);
1209
1210	return idx;
1211}
1212
1213static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
1214				   struct sgt_dma *iter,
1215				   enum i915_cache_level cache_level,
1216				   u32 flags)
1217{
1218	const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
1219	u64 start = vma->node.start;
1220	dma_addr_t rem = iter->sg->length;
1221
1222	GEM_BUG_ON(!i915_vm_is_4lvl(vma->vm));
1223
1224	do {
1225		struct i915_page_directory * const pdp =
1226			gen8_pdp_for_page_address(vma->vm, start);
1227		struct i915_page_directory * const pd =
1228			i915_pd_entry(pdp, __gen8_pte_index(start, 2));
1229		gen8_pte_t encode = pte_encode;
1230		unsigned int maybe_64K = -1;
1231		unsigned int page_size;
1232		gen8_pte_t *vaddr;
1233		u16 index;
1234
1235		if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_2M &&
1236		    IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) &&
1237		    rem >= I915_GTT_PAGE_SIZE_2M &&
1238		    !__gen8_pte_index(start, 0)) {
1239			index = __gen8_pte_index(start, 1);
1240			encode |= GEN8_PDE_PS_2M;
1241			page_size = I915_GTT_PAGE_SIZE_2M;
1242
1243			vaddr = kmap_atomic_px(pd);
1244		} else {
1245			struct i915_page_table *pt =
1246				i915_pt_entry(pd, __gen8_pte_index(start, 1));
1247
1248			index = __gen8_pte_index(start, 0);
1249			page_size = I915_GTT_PAGE_SIZE;
1250
1251			if (!index &&
1252			    vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K &&
1253			    IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
1254			    (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
1255			     rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE))
1256				maybe_64K = __gen8_pte_index(start, 1);
1257
1258			vaddr = kmap_atomic_px(pt);
1259		}
1260
1261		do {
1262			GEM_BUG_ON(iter->sg->length < page_size);
1263			vaddr[index++] = encode | iter->dma;
1264
1265			start += page_size;
1266			iter->dma += page_size;
1267			rem -= page_size;
1268			if (iter->dma >= iter->max) {
1269				iter->sg = __sg_next(iter->sg);
1270				if (!iter->sg)
1271					break;
1272
1273				rem = iter->sg->length;
1274				iter->dma = sg_dma_address(iter->sg);
1275				iter->max = iter->dma + rem;
1276
1277				if (maybe_64K != -1 && index < I915_PDES &&
1278				    !(IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
1279				      (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
1280				       rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE)))
1281					maybe_64K = -1;
1282
1283				if (unlikely(!IS_ALIGNED(iter->dma, page_size)))
1284					break;
1285			}
1286		} while (rem >= page_size && index < I915_PDES);
1287
1288		kunmap_atomic(vaddr);
1289
1290		/*
1291		 * Is it safe to mark the 2M block as 64K? -- Either we have
1292		 * filled whole page-table with 64K entries, or filled part of
1293		 * it and have reached the end of the sg table and we have
1294		 * enough padding.
1295		 */
1296		if (maybe_64K != -1 &&
1297		    (index == I915_PDES ||
1298		     (i915_vm_has_scratch_64K(vma->vm) &&
1299		      !iter->sg && IS_ALIGNED(vma->node.start +
1300					      vma->node.size,
1301					      I915_GTT_PAGE_SIZE_2M)))) {
1302			vaddr = kmap_atomic_px(pd);
1303			vaddr[maybe_64K] |= GEN8_PDE_IPS_64K;
1304			kunmap_atomic(vaddr);
1305			page_size = I915_GTT_PAGE_SIZE_64K;
1306
1307			/*
1308			 * We write all 4K page entries, even when using 64K
1309			 * pages. In order to verify that the HW isn't cheating
1310			 * by using the 4K PTE instead of the 64K PTE, we want
1311			 * to remove all the surplus entries. If the HW skipped
1312			 * the 64K PTE, it will read/write into the scratch page
1313			 * instead - which we detect as missing results during
1314			 * selftests.
1315			 */
1316			if (I915_SELFTEST_ONLY(vma->vm->scrub_64K)) {
1317				u16 i;
1318
1319				encode = vma->vm->scratch[0].encode;
1320				vaddr = kmap_atomic_px(i915_pt_entry(pd, maybe_64K));
1321
1322				for (i = 1; i < index; i += 16)
1323					memset64(vaddr + i, encode, 15);
1324
1325				kunmap_atomic(vaddr);
1326			}
1327		}
1328
1329		vma->page_sizes.gtt |= page_size;
1330	} while (iter->sg);
1331}
1332
1333static void gen8_ppgtt_insert(struct i915_address_space *vm,
1334			      struct i915_vma *vma,
1335			      enum i915_cache_level cache_level,
1336			      u32 flags)
1337{
1338	struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
1339	struct sgt_dma iter = sgt_dma(vma);
1340
1341	if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
1342		gen8_ppgtt_insert_huge(vma, &iter, cache_level, flags);
1343	} else  {
1344		u64 idx = vma->node.start >> GEN8_PTE_SHIFT;
1345
1346		do {
1347			struct i915_page_directory * const pdp =
1348				gen8_pdp_for_page_index(vm, idx);
1349
1350			idx = gen8_ppgtt_insert_pte(ppgtt, pdp, &iter, idx,
1351						    cache_level, flags);
1352		} while (idx);
1353
1354		vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
1355	}
1356}
1357
1358static int gen8_init_scratch(struct i915_address_space *vm)
1359{
1360	int ret;
1361	int i;
1362
1363	/*
1364	 * If everybody agrees to not to write into the scratch page,
1365	 * we can reuse it for all vm, keeping contexts and processes separate.
1366	 */
1367	if (vm->has_read_only &&
1368	    vm->i915->kernel_context &&
1369	    vm->i915->kernel_context->vm) {
1370		struct i915_address_space *clone = vm->i915->kernel_context->vm;
1371
1372		GEM_BUG_ON(!clone->has_read_only);
1373
1374		vm->scratch_order = clone->scratch_order;
1375		memcpy(vm->scratch, clone->scratch, sizeof(vm->scratch));
1376		px_dma(&vm->scratch[0]) = 0; /* no xfer of ownership */
1377		return 0;
1378	}
1379
1380	ret = setup_scratch_page(vm, __GFP_HIGHMEM);
1381	if (ret)
1382		return ret;
1383
1384	vm->scratch[0].encode =
1385		gen8_pte_encode(px_dma(&vm->scratch[0]),
1386				I915_CACHE_LLC, vm->has_read_only);
1387
1388	for (i = 1; i <= vm->top; i++) {
1389		if (unlikely(setup_page_dma(vm, px_base(&vm->scratch[i]))))
1390			goto free_scratch;
1391
1392		fill_px(&vm->scratch[i], vm->scratch[i - 1].encode);
1393		vm->scratch[i].encode =
1394			gen8_pde_encode(px_dma(&vm->scratch[i]),
1395					I915_CACHE_LLC);
1396	}
1397
1398	return 0;
1399
1400free_scratch:
1401	free_scratch(vm);
1402	return -ENOMEM;
1403}
1404
1405static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
1406{
1407	struct i915_address_space *vm = &ppgtt->vm;
1408	struct i915_page_directory *pd = ppgtt->pd;
1409	unsigned int idx;
1410
1411	GEM_BUG_ON(vm->top != 2);
1412	GEM_BUG_ON(gen8_pd_top_count(vm) != GEN8_3LVL_PDPES);
1413
1414	for (idx = 0; idx < GEN8_3LVL_PDPES; idx++) {
1415		struct i915_page_directory *pde;
1416
1417		pde = alloc_pd(vm);
1418		if (IS_ERR(pde))
1419			return PTR_ERR(pde);
1420
1421		fill_px(pde, vm->scratch[1].encode);
1422		set_pd_entry(pd, idx, pde);
1423		atomic_inc(px_used(pde)); /* keep pinned */
1424	}
1425
1426	return 0;
1427}
1428
1429static void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt)
1430{
1431	struct drm_i915_private *i915 = gt->i915;
1432
1433	ppgtt->vm.gt = gt;
1434	ppgtt->vm.i915 = i915;
1435	ppgtt->vm.dma = &i915->drm.pdev->dev;
1436	ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size);
1437
1438	i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
1439
1440	ppgtt->vm.vma_ops.bind_vma    = ppgtt_bind_vma;
1441	ppgtt->vm.vma_ops.unbind_vma  = ppgtt_unbind_vma;
1442	ppgtt->vm.vma_ops.set_pages   = ppgtt_set_pages;
1443	ppgtt->vm.vma_ops.clear_pages = clear_pages;
1444}
1445
1446static struct i915_page_directory *
1447gen8_alloc_top_pd(struct i915_address_space *vm)
1448{
1449	const unsigned int count = gen8_pd_top_count(vm);
1450	struct i915_page_directory *pd;
1451
1452	GEM_BUG_ON(count > ARRAY_SIZE(pd->entry));
1453
1454	pd = __alloc_pd(offsetof(typeof(*pd), entry[count]));
1455	if (unlikely(!pd))
1456		return ERR_PTR(-ENOMEM);
1457
1458	if (unlikely(setup_page_dma(vm, px_base(pd)))) {
1459		kfree(pd);
1460		return ERR_PTR(-ENOMEM);
1461	}
1462
1463	fill_page_dma(px_base(pd), vm->scratch[vm->top].encode, count);
1464	atomic_inc(px_used(pd)); /* mark as pinned */
1465	return pd;
1466}
1467
1468/*
1469 * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
1470 * with a net effect resembling a 2-level page table in normal x86 terms. Each
1471 * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
1472 * space.
1473 *
1474 */
1475static struct i915_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
1476{
1477	struct i915_ppgtt *ppgtt;
1478	int err;
1479
1480	ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
1481	if (!ppgtt)
1482		return ERR_PTR(-ENOMEM);
1483
1484	ppgtt_init(ppgtt, &i915->gt);
1485	ppgtt->vm.top = i915_vm_is_4lvl(&ppgtt->vm) ? 3 : 2;
1486
1487	/*
1488	 * From bdw, there is hw support for read-only pages in the PPGTT.
1489	 *
1490	 * Gen11 has HSDES#:1807136187 unresolved. Disable ro support
1491	 * for now.
1492	 */
1493	ppgtt->vm.has_read_only = INTEL_GEN(i915) != 11;
1494
1495	/* There are only few exceptions for gen >=6. chv and bxt.
1496	 * And we are not sure about the latter so play safe for now.
1497	 */
1498	if (IS_CHERRYVIEW(i915) || IS_BROXTON(i915))
1499		ppgtt->vm.pt_kmap_wc = true;
1500
1501	err = gen8_init_scratch(&ppgtt->vm);
1502	if (err)
1503		goto err_free;
1504
1505	ppgtt->pd = gen8_alloc_top_pd(&ppgtt->vm);
1506	if (IS_ERR(ppgtt->pd)) {
1507		err = PTR_ERR(ppgtt->pd);
1508		goto err_free_scratch;
1509	}
1510
1511	if (!i915_vm_is_4lvl(&ppgtt->vm)) {
1512		if (intel_vgpu_active(i915)) {
1513			err = gen8_preallocate_top_level_pdp(ppgtt);
1514			if (err)
1515				goto err_free_pd;
1516		}
1517	}
1518
1519	ppgtt->vm.insert_entries = gen8_ppgtt_insert;
1520	ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc;
1521	ppgtt->vm.clear_range = gen8_ppgtt_clear;
1522
1523	if (intel_vgpu_active(i915))
1524		gen8_ppgtt_notify_vgt(ppgtt, true);
1525
1526	ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
1527
1528	return ppgtt;
1529
1530err_free_pd:
1531	__gen8_ppgtt_cleanup(&ppgtt->vm, ppgtt->pd,
1532			     gen8_pd_top_count(&ppgtt->vm), ppgtt->vm.top);
1533err_free_scratch:
1534	free_scratch(&ppgtt->vm);
1535err_free:
1536	kfree(ppgtt);
1537	return ERR_PTR(err);
1538}
1539
1540/* Write pde (index) from the page directory @pd to the page table @pt */
1541static inline void gen6_write_pde(const struct gen6_ppgtt *ppgtt,
1542				  const unsigned int pde,
1543				  const struct i915_page_table *pt)
1544{
1545	/* Caller needs to make sure the write completes if necessary */
1546	iowrite32(GEN6_PDE_ADDR_ENCODE(px_dma(pt)) | GEN6_PDE_VALID,
1547		  ppgtt->pd_addr + pde);
1548}
1549
1550static void gen7_ppgtt_enable(struct intel_gt *gt)
1551{
1552	struct drm_i915_private *i915 = gt->i915;
1553	struct intel_uncore *uncore = gt->uncore;
1554	struct intel_engine_cs *engine;
1555	enum intel_engine_id id;
1556	u32 ecochk;
1557
1558	intel_uncore_rmw(uncore, GAC_ECO_BITS, 0, ECOBITS_PPGTT_CACHE64B);
1559
1560	ecochk = intel_uncore_read(uncore, GAM_ECOCHK);
1561	if (IS_HASWELL(i915)) {
1562		ecochk |= ECOCHK_PPGTT_WB_HSW;
1563	} else {
1564		ecochk |= ECOCHK_PPGTT_LLC_IVB;
1565		ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
1566	}
1567	intel_uncore_write(uncore, GAM_ECOCHK, ecochk);
1568
1569	for_each_engine(engine, i915, id) {
1570		/* GFX_MODE is per-ring on gen7+ */
1571		ENGINE_WRITE(engine,
1572			     RING_MODE_GEN7,
1573			     _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
1574	}
1575}
1576
1577static void gen6_ppgtt_enable(struct intel_gt *gt)
1578{
1579	struct intel_uncore *uncore = gt->uncore;
1580
1581	intel_uncore_rmw(uncore,
1582			 GAC_ECO_BITS,
1583			 0,
1584			 ECOBITS_SNB_BIT | ECOBITS_PPGTT_CACHE64B);
1585
1586	intel_uncore_rmw(uncore,
1587			 GAB_CTL,
1588			 0,
1589			 GAB_CTL_CONT_AFTER_PAGEFAULT);
1590
1591	intel_uncore_rmw(uncore,
1592			 GAM_ECOCHK,
1593			 0,
1594			 ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
1595
1596	if (HAS_PPGTT(uncore->i915)) /* may be disabled for VT-d */
1597		intel_uncore_write(uncore,
1598				   GFX_MODE,
1599				   _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
1600}
1601
1602/* PPGTT support for Sandybdrige/Gen6 and later */
1603static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
1604				   u64 start, u64 length)
1605{
1606	struct gen6_ppgtt * const ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
1607	const unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
1608	const gen6_pte_t scratch_pte = vm->scratch[0].encode;
1609	unsigned int pde = first_entry / GEN6_PTES;
1610	unsigned int pte = first_entry % GEN6_PTES;
1611	unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
1612
1613	while (num_entries) {
1614		struct i915_page_table * const pt =
1615			i915_pt_entry(ppgtt->base.pd, pde++);
1616		const unsigned int count = min(num_entries, GEN6_PTES - pte);
1617		gen6_pte_t *vaddr;
1618
1619		GEM_BUG_ON(px_base(pt) == px_base(&vm->scratch[1]));
1620
1621		num_entries -= count;
1622
1623		GEM_BUG_ON(count > atomic_read(&pt->used));
1624		if (!atomic_sub_return(count, &pt->used))
1625			ppgtt->scan_for_unused_pt = true;
1626
1627		/*
1628		 * Note that the hw doesn't support removing PDE on the fly
1629		 * (they are cached inside the context with no means to
1630		 * invalidate the cache), so we can only reset the PTE
1631		 * entries back to scratch.
1632		 */
1633
1634		vaddr = kmap_atomic_px(pt);
1635		memset32(vaddr + pte, scratch_pte, count);
1636		kunmap_atomic(vaddr);
1637
1638		pte = 0;
1639	}
1640}
1641
1642static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
1643				      struct i915_vma *vma,
1644				      enum i915_cache_level cache_level,
1645				      u32 flags)
1646{
1647	struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1648	struct i915_page_directory * const pd = ppgtt->pd;
1649	unsigned first_entry = vma->node.start / I915_GTT_PAGE_SIZE;
1650	unsigned act_pt = first_entry / GEN6_PTES;
1651	unsigned act_pte = first_entry % GEN6_PTES;
1652	const u32 pte_encode = vm->pte_encode(0, cache_level, flags);
1653	struct sgt_dma iter = sgt_dma(vma);
1654	gen6_pte_t *vaddr;
1655
1656	GEM_BUG_ON(pd->entry[act_pt] == &vm->scratch[1]);
1657
1658	vaddr = kmap_atomic_px(i915_pt_entry(pd, act_pt));
1659	do {
1660		vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
1661
1662		iter.dma += I915_GTT_PAGE_SIZE;
1663		if (iter.dma == iter.max) {
1664			iter.sg = __sg_next(iter.sg);
1665			if (!iter.sg)
1666				break;
1667
1668			iter.dma = sg_dma_address(iter.sg);
1669			iter.max = iter.dma + iter.sg->length;
1670		}
1671
1672		if (++act_pte == GEN6_PTES) {
1673			kunmap_atomic(vaddr);
1674			vaddr = kmap_atomic_px(i915_pt_entry(pd, ++act_pt));
1675			act_pte = 0;
1676		}
1677	} while (1);
1678	kunmap_atomic(vaddr);
1679
1680	vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
1681}
1682
1683static int gen6_alloc_va_range(struct i915_address_space *vm,
1684			       u64 start, u64 length)
1685{
1686	struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
1687	struct i915_page_directory * const pd = ppgtt->base.pd;
1688	struct i915_page_table *pt, *alloc = NULL;
1689	intel_wakeref_t wakeref;
1690	u64 from = start;
1691	unsigned int pde;
1692	bool flush = false;
1693	int ret = 0;
1694
1695	wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
1696
1697	spin_lock(&pd->lock);
1698	gen6_for_each_pde(pt, pd, start, length, pde) {
1699		const unsigned int count = gen6_pte_count(start, length);
1700
1701		if (px_base(pt) == px_base(&vm->scratch[1])) {
1702			spin_unlock(&pd->lock);
1703
1704			pt = fetch_and_zero(&alloc);
1705			if (!pt)
1706				pt = alloc_pt(vm);
1707			if (IS_ERR(pt)) {
1708				ret = PTR_ERR(pt);
1709				goto unwind_out;
1710			}
1711
1712			fill32_px(pt, vm->scratch[0].encode);
1713
1714			spin_lock(&pd->lock);
1715			if (pd->entry[pde] == &vm->scratch[1]) {
1716				pd->entry[pde] = pt;
1717				if (i915_vma_is_bound(ppgtt->vma,
1718						      I915_VMA_GLOBAL_BIND)) {
1719					gen6_write_pde(ppgtt, pde, pt);
1720					flush = true;
1721				}
1722			} else {
1723				alloc = pt;
1724				pt = pd->entry[pde];
1725			}
1726		}
1727
1728		atomic_add(count, &pt->used);
1729	}
1730	spin_unlock(&pd->lock);
1731
1732	if (flush) {
1733		mark_tlbs_dirty(&ppgtt->base);
1734		gen6_ggtt_invalidate(vm->gt->ggtt);
1735	}
1736
1737	goto out;
1738
1739unwind_out:
1740	gen6_ppgtt_clear_range(vm, from, start - from);
1741out:
1742	if (alloc)
1743		free_px(vm, alloc);
1744	intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
1745	return ret;
1746}
1747
1748static int gen6_ppgtt_init_scratch(struct gen6_ppgtt *ppgtt)
1749{
1750	struct i915_address_space * const vm = &ppgtt->base.vm;
1751	struct i915_page_directory * const pd = ppgtt->base.pd;
1752	int ret;
1753
1754	ret = setup_scratch_page(vm, __GFP_HIGHMEM);
1755	if (ret)
1756		return ret;
1757
1758	vm->scratch[0].encode =
1759		vm->pte_encode(px_dma(&vm->scratch[0]),
1760			       I915_CACHE_NONE, PTE_READ_ONLY);
1761
1762	if (unlikely(setup_page_dma(vm, px_base(&vm->scratch[1])))) {
1763		cleanup_scratch_page(vm);
1764		return -ENOMEM;
1765	}
1766
1767	fill32_px(&vm->scratch[1], vm->scratch[0].encode);
1768	memset_p(pd->entry, &vm->scratch[1], I915_PDES);
1769
1770	return 0;
1771}
1772
1773static void gen6_ppgtt_free_pd(struct gen6_ppgtt *ppgtt)
1774{
1775	struct i915_page_directory * const pd = ppgtt->base.pd;
1776	struct i915_page_dma * const scratch =
1777		px_base(&ppgtt->base.vm.scratch[1]);
1778	struct i915_page_table *pt;
1779	u32 pde;
1780
1781	gen6_for_all_pdes(pt, pd, pde)
1782		if (px_base(pt) != scratch)
1783			free_px(&ppgtt->base.vm, pt);
1784}
1785
1786static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
1787{
1788	struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
1789	struct drm_i915_private *i915 = vm->i915;
1790
1791	/* FIXME remove the struct_mutex to bring the locking under control */
1792	mutex_lock(&i915->drm.struct_mutex);
1793	i915_vma_destroy(ppgtt->vma);
1794	mutex_unlock(&i915->drm.struct_mutex);
1795
1796	gen6_ppgtt_free_pd(ppgtt);
1797	free_scratch(vm);
1798	kfree(ppgtt->base.pd);
1799}
1800
1801static int pd_vma_set_pages(struct i915_vma *vma)
1802{
1803	vma->pages = ERR_PTR(-ENODEV);
1804	return 0;
1805}
1806
1807static void pd_vma_clear_pages(struct i915_vma *vma)
1808{
1809	GEM_BUG_ON(!vma->pages);
1810
1811	vma->pages = NULL;
1812}
1813
1814static int pd_vma_bind(struct i915_vma *vma,
1815		       enum i915_cache_level cache_level,
1816		       u32 unused)
1817{
1818	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm);
1819	struct gen6_ppgtt *ppgtt = vma->private;
1820	u32 ggtt_offset = i915_ggtt_offset(vma) / I915_GTT_PAGE_SIZE;
1821	struct i915_page_table *pt;
1822	unsigned int pde;
1823
1824	px_base(ppgtt->base.pd)->ggtt_offset = ggtt_offset * sizeof(gen6_pte_t);
1825	ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm + ggtt_offset;
1826
1827	gen6_for_all_pdes(pt, ppgtt->base.pd, pde)
1828		gen6_write_pde(ppgtt, pde, pt);
1829
1830	mark_tlbs_dirty(&ppgtt->base);
1831	gen6_ggtt_invalidate(ggtt);
1832
1833	return 0;
1834}
1835
1836static void pd_vma_unbind(struct i915_vma *vma)
1837{
1838	struct gen6_ppgtt *ppgtt = vma->private;
1839	struct i915_page_directory * const pd = ppgtt->base.pd;
1840	struct i915_page_dma * const scratch =
1841		px_base(&ppgtt->base.vm.scratch[1]);
1842	struct i915_page_table *pt;
1843	unsigned int pde;
1844
1845	if (!ppgtt->scan_for_unused_pt)
1846		return;
1847
1848	/* Free all no longer used page tables */
1849	gen6_for_all_pdes(pt, ppgtt->base.pd, pde) {
1850		if (px_base(pt) == scratch || atomic_read(&pt->used))
1851			continue;
1852
1853		free_px(&ppgtt->base.vm, pt);
1854		pd->entry[pde] = scratch;
1855	}
1856
1857	ppgtt->scan_for_unused_pt = false;
1858}
1859
1860static const struct i915_vma_ops pd_vma_ops = {
1861	.set_pages = pd_vma_set_pages,
1862	.clear_pages = pd_vma_clear_pages,
1863	.bind_vma = pd_vma_bind,
1864	.unbind_vma = pd_vma_unbind,
1865};
1866
1867static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size)
1868{
1869	struct drm_i915_private *i915 = ppgtt->base.vm.i915;
1870	struct i915_ggtt *ggtt = ppgtt->base.vm.gt->ggtt;
1871	struct i915_vma *vma;
1872
1873	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
1874	GEM_BUG_ON(size > ggtt->vm.total);
1875
1876	vma = i915_vma_alloc();
1877	if (!vma)
1878		return ERR_PTR(-ENOMEM);
1879
1880	i915_active_init(i915, &vma->active, NULL, NULL);
1881
1882	vma->vm = &ggtt->vm;
1883	vma->ops = &pd_vma_ops;
1884	vma->private = ppgtt;
1885
1886	vma->size = size;
1887	vma->fence_size = size;
1888	vma->flags = I915_VMA_GGTT;
1889	vma->ggtt_view.type = I915_GGTT_VIEW_ROTATED; /* prevent fencing */
1890
1891	INIT_LIST_HEAD(&vma->obj_link);
1892	INIT_LIST_HEAD(&vma->closed_link);
1893
1894	mutex_lock(&vma->vm->mutex);
1895	list_add(&vma->vm_link, &vma->vm->unbound_list);
1896	mutex_unlock(&vma->vm->mutex);
1897
1898	return vma;
1899}
1900
1901int gen6_ppgtt_pin(struct i915_ppgtt *base)
1902{
1903	struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
1904	int err;
1905
1906	GEM_BUG_ON(ppgtt->base.vm.closed);
1907
1908	/*
1909	 * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt
1910	 * which will be pinned into every active context.
1911	 * (When vma->pin_count becomes atomic, I expect we will naturally
1912	 * need a larger, unpacked, type and kill this redundancy.)
1913	 */
1914	if (ppgtt->pin_count++)
1915		return 0;
1916
1917	/*
1918	 * PPGTT PDEs reside in the GGTT and consists of 512 entries. The
1919	 * allocator works in address space sizes, so it's multiplied by page
1920	 * size. We allocate at the top of the GTT to avoid fragmentation.
1921	 */
1922	err = i915_vma_pin(ppgtt->vma,
1923			   0, GEN6_PD_ALIGN,
1924			   PIN_GLOBAL | PIN_HIGH);
1925	if (err)
1926		goto unpin;
1927
1928	return 0;
1929
1930unpin:
1931	ppgtt->pin_count = 0;
1932	return err;
1933}
1934
1935void gen6_ppgtt_unpin(struct i915_ppgtt *base)
1936{
1937	struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
1938
1939	GEM_BUG_ON(!ppgtt->pin_count);
1940	if (--ppgtt->pin_count)
1941		return;
1942
1943	i915_vma_unpin(ppgtt->vma);
1944}
1945
1946void gen6_ppgtt_unpin_all(struct i915_ppgtt *base)
1947{
1948	struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
1949
1950	if (!ppgtt->pin_count)
1951		return;
1952
1953	ppgtt->pin_count = 0;
1954	i915_vma_unpin(ppgtt->vma);
1955}
1956
1957static struct i915_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
1958{
1959	struct i915_ggtt * const ggtt = &i915->ggtt;
1960	struct gen6_ppgtt *ppgtt;
1961	int err;
1962
1963	ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
1964	if (!ppgtt)
1965		return ERR_PTR(-ENOMEM);
1966
1967	ppgtt_init(&ppgtt->base, &i915->gt);
1968	ppgtt->base.vm.top = 1;
1969
1970	ppgtt->base.vm.allocate_va_range = gen6_alloc_va_range;
1971	ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range;
1972	ppgtt->base.vm.insert_entries = gen6_ppgtt_insert_entries;
1973	ppgtt->base.vm.cleanup = gen6_ppgtt_cleanup;
1974
1975	ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode;
1976
1977	ppgtt->base.pd = __alloc_pd(sizeof(*ppgtt->base.pd));
1978	if (!ppgtt->base.pd) {
1979		err = -ENOMEM;
1980		goto err_free;
1981	}
1982
1983	err = gen6_ppgtt_init_scratch(ppgtt);
1984	if (err)
1985		goto err_pd;
1986
1987	ppgtt->vma = pd_vma_create(ppgtt, GEN6_PD_SIZE);
1988	if (IS_ERR(ppgtt->vma)) {
1989		err = PTR_ERR(ppgtt->vma);
1990		goto err_scratch;
1991	}
1992
1993	return &ppgtt->base;
1994
1995err_scratch:
1996	free_scratch(&ppgtt->base.vm);
1997err_pd:
1998	kfree(ppgtt->base.pd);
1999err_free:
2000	kfree(ppgtt);
2001	return ERR_PTR(err);
2002}
2003
2004static void gtt_write_workarounds(struct intel_gt *gt)
2005{
2006	struct drm_i915_private *i915 = gt->i915;
2007	struct intel_uncore *uncore = gt->uncore;
2008
2009	/* This function is for gtt related workarounds. This function is
2010	 * called on driver load and after a GPU reset, so you can place
2011	 * workarounds here even if they get overwritten by GPU reset.
2012	 */
2013	/* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl,cnl,icl */
2014	if (IS_BROADWELL(i915))
2015		intel_uncore_write(uncore,
2016				   GEN8_L3_LRA_1_GPGPU,
2017				   GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
2018	else if (IS_CHERRYVIEW(i915))
2019		intel_uncore_write(uncore,
2020				   GEN8_L3_LRA_1_GPGPU,
2021				   GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
2022	else if (IS_GEN9_LP(i915))
2023		intel_uncore_write(uncore,
2024				   GEN8_L3_LRA_1_GPGPU,
2025				   GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
2026	else if (INTEL_GEN(i915) >= 9)
2027		intel_uncore_write(uncore,
2028				   GEN8_L3_LRA_1_GPGPU,
2029				   GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
2030
2031	/*
2032	 * To support 64K PTEs we need to first enable the use of the
2033	 * Intermediate-Page-Size(IPS) bit of the PDE field via some magical
2034	 * mmio, otherwise the page-walker will simply ignore the IPS bit. This
2035	 * shouldn't be needed after GEN10.
2036	 *
2037	 * 64K pages were first introduced from BDW+, although technically they
2038	 * only *work* from gen9+. For pre-BDW we instead have the option for
2039	 * 32K pages, but we don't currently have any support for it in our
2040	 * driver.
2041	 */
2042	if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K) &&
2043	    INTEL_GEN(i915) <= 10)
2044		intel_uncore_rmw(uncore,
2045				 GEN8_GAMW_ECO_DEV_RW_IA,
2046				 0,
2047				 GAMW_ECO_ENABLE_64K_IPS_FIELD);
2048
2049	if (IS_GEN_RANGE(i915, 8, 11)) {
2050		bool can_use_gtt_cache = true;
2051
2052		/*
2053		 * According to the BSpec if we use 2M/1G pages then we also
2054		 * need to disable the GTT cache. At least on BDW we can see
2055		 * visual corruption when using 2M pages, and not disabling the
2056		 * GTT cache.
2057		 */
2058		if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_2M))
2059			can_use_gtt_cache = false;
2060
2061		/* WaGttCachingOffByDefault */
2062		intel_uncore_write(uncore,
2063				   HSW_GTT_CACHE_EN,
2064				   can_use_gtt_cache ? GTT_CACHE_EN_ALL : 0);
2065		WARN_ON_ONCE(can_use_gtt_cache &&
2066			     intel_uncore_read(uncore,
2067					       HSW_GTT_CACHE_EN) == 0);
2068	}
2069}
2070
2071int i915_ppgtt_init_hw(struct intel_gt *gt)
2072{
2073	struct drm_i915_private *i915 = gt->i915;
2074
2075	gtt_write_workarounds(gt);
2076
2077	if (IS_GEN(i915, 6))
2078		gen6_ppgtt_enable(gt);
2079	else if (IS_GEN(i915, 7))
2080		gen7_ppgtt_enable(gt);
2081
2082	return 0;
2083}
2084
2085static struct i915_ppgtt *
2086__ppgtt_create(struct drm_i915_private *i915)
2087{
2088	if (INTEL_GEN(i915) < 8)
2089		return gen6_ppgtt_create(i915);
2090	else
2091		return gen8_ppgtt_create(i915);
2092}
2093
2094struct i915_ppgtt *
2095i915_ppgtt_create(struct drm_i915_private *i915)
2096{
2097	struct i915_ppgtt *ppgtt;
2098
2099	ppgtt = __ppgtt_create(i915);
2100	if (IS_ERR(ppgtt))
2101		return ppgtt;
2102
2103	trace_i915_ppgtt_create(&ppgtt->vm);
2104
2105	return ppgtt;
2106}
2107
2108/* Certain Gen5 chipsets require require idling the GPU before
2109 * unmapping anything from the GTT when VT-d is enabled.
2110 */
2111static bool needs_idle_maps(struct drm_i915_private *dev_priv)
2112{
2113	/* Query intel_iommu to see if we need the workaround. Presumably that
2114	 * was loaded first.
2115	 */
2116	return IS_GEN(dev_priv, 5) && IS_MOBILE(dev_priv) && intel_vtd_active();
2117}
2118
2119static void ggtt_suspend_mappings(struct i915_ggtt *ggtt)
2120{
2121	struct drm_i915_private *i915 = ggtt->vm.i915;
2122
2123	/* Don't bother messing with faults pre GEN6 as we have little
2124	 * documentation supporting that it's a good idea.
2125	 */
2126	if (INTEL_GEN(i915) < 6)
2127		return;
2128
2129	intel_gt_check_and_clear_faults(ggtt->vm.gt);
2130
2131	ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
2132
2133	ggtt->invalidate(ggtt);
2134}
2135
2136void i915_gem_suspend_gtt_mappings(struct drm_i915_private *i915)
2137{
2138	ggtt_suspend_mappings(&i915->ggtt);
2139}
2140
2141int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
2142			       struct sg_table *pages)
2143{
2144	do {
2145		if (dma_map_sg_attrs(&obj->base.dev->pdev->dev,
2146				     pages->sgl, pages->nents,
2147				     PCI_DMA_BIDIRECTIONAL,
2148				     DMA_ATTR_NO_WARN))
2149			return 0;
2150
2151		/*
2152		 * If the DMA remap fails, one cause can be that we have
2153		 * too many objects pinned in a small remapping table,
2154		 * such as swiotlb. Incrementally purge all other objects and
2155		 * try again - if there are no more pages to remove from
2156		 * the DMA remapper, i915_gem_shrink will return 0.
2157		 */
2158		GEM_BUG_ON(obj->mm.pages == pages);
2159	} while (i915_gem_shrink(to_i915(obj->base.dev),
2160				 obj->base.size >> PAGE_SHIFT, NULL,
2161				 I915_SHRINK_BOUND |
2162				 I915_SHRINK_UNBOUND));
2163
2164	return -ENOSPC;
2165}
2166
2167static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
2168{
2169	writeq(pte, addr);
2170}
2171
2172static void gen8_ggtt_insert_page(struct i915_address_space *vm,
2173				  dma_addr_t addr,
2174				  u64 offset,
2175				  enum i915_cache_level level,
2176				  u32 unused)
2177{
2178	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2179	gen8_pte_t __iomem *pte =
2180		(gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
2181
2182	gen8_set_pte(pte, gen8_pte_encode(addr, level, 0));
2183
2184	ggtt->invalidate(ggtt);
2185}
2186
2187static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
2188				     struct i915_vma *vma,
2189				     enum i915_cache_level level,
2190				     u32 flags)
2191{
2192	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2193	struct sgt_iter sgt_iter;
2194	gen8_pte_t __iomem *gtt_entries;
2195	const gen8_pte_t pte_encode = gen8_pte_encode(0, level, 0);
2196	dma_addr_t addr;
2197
2198	/*
2199	 * Note that we ignore PTE_READ_ONLY here. The caller must be careful
2200	 * not to allow the user to override access to a read only page.
2201	 */
2202
2203	gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm;
2204	gtt_entries += vma->node.start / I915_GTT_PAGE_SIZE;
2205	for_each_sgt_dma(addr, sgt_iter, vma->pages)
2206		gen8_set_pte(gtt_entries++, pte_encode | addr);
2207
2208	/*
2209	 * We want to flush the TLBs only after we're certain all the PTE
2210	 * updates have finished.
2211	 */
2212	ggtt->invalidate(ggtt);
2213}
2214
2215static void gen6_ggtt_insert_page(struct i915_address_space *vm,
2216				  dma_addr_t addr,
2217				  u64 offset,
2218				  enum i915_cache_level level,
2219				  u32 flags)
2220{
2221	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2222	gen6_pte_t __iomem *pte =
2223		(gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
2224
2225	iowrite32(vm->pte_encode(addr, level, flags), pte);
2226
2227	ggtt->invalidate(ggtt);
2228}
2229
2230/*
2231 * Binds an object into the global gtt with the specified cache level. The object
2232 * will be accessible to the GPU via commands whose operands reference offsets
2233 * within the global GTT as well as accessible by the GPU through the GMADR
2234 * mapped BAR (dev_priv->mm.gtt->gtt).
2235 */
2236static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
2237				     struct i915_vma *vma,
2238				     enum i915_cache_level level,
2239				     u32 flags)
2240{
2241	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2242	gen6_pte_t __iomem *entries = (gen6_pte_t __iomem *)ggtt->gsm;
2243	unsigned int i = vma->node.start / I915_GTT_PAGE_SIZE;
2244	struct sgt_iter iter;
2245	dma_addr_t addr;
2246	for_each_sgt_dma(addr, iter, vma->pages)
2247		iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]);
2248
2249	/*
2250	 * We want to flush the TLBs only after we're certain all the PTE
2251	 * updates have finished.
2252	 */
2253	ggtt->invalidate(ggtt);
2254}
2255
2256static void nop_clear_range(struct i915_address_space *vm,
2257			    u64 start, u64 length)
2258{
2259}
2260
2261static void gen8_ggtt_clear_range(struct i915_address_space *vm,
2262				  u64 start, u64 length)
2263{
2264	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2265	unsigned first_entry = start / I915_GTT_PAGE_SIZE;
2266	unsigned num_entries = length / I915_GTT_PAGE_SIZE;
2267	const gen8_pte_t scratch_pte = vm->scratch[0].encode;
2268	gen8_pte_t __iomem *gtt_base =
2269		(gen8_pte_t __iomem *)ggtt->gsm + first_entry;
2270	const int max_entries = ggtt_total_entries(ggtt) - first_entry;
2271	int i;
2272
2273	if (WARN(num_entries > max_entries,
2274		 "First entry = %d; Num entries = %d (max=%d)\n",
2275		 first_entry, num_entries, max_entries))
2276		num_entries = max_entries;
2277
2278	for (i = 0; i < num_entries; i++)
2279		gen8_set_pte(&gtt_base[i], scratch_pte);
2280}
2281
2282static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
2283{
2284	struct drm_i915_private *dev_priv = vm->i915;
2285
2286	/*
2287	 * Make sure the internal GAM fifo has been cleared of all GTT
2288	 * writes before exiting stop_machine(). This guarantees that
2289	 * any aperture accesses waiting to start in another process
2290	 * cannot back up behind the GTT writes causing a hang.
2291	 * The register can be any arbitrary GAM register.
2292	 */
2293	POSTING_READ(GFX_FLSH_CNTL_GEN6);
2294}
2295
2296struct insert_page {
2297	struct i915_address_space *vm;
2298	dma_addr_t addr;
2299	u64 offset;
2300	enum i915_cache_level level;
2301};
2302
2303static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
2304{
2305	struct insert_page *arg = _arg;
2306
2307	gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0);
2308	bxt_vtd_ggtt_wa(arg->vm);
2309
2310	return 0;
2311}
2312
2313static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
2314					  dma_addr_t addr,
2315					  u64 offset,
2316					  enum i915_cache_level level,
2317					  u32 unused)
2318{
2319	struct insert_page arg = { vm, addr, offset, level };
2320
2321	stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
2322}
2323
2324struct insert_entries {
2325	struct i915_address_space *vm;
2326	struct i915_vma *vma;
2327	enum i915_cache_level level;
2328	u32 flags;
2329};
2330
2331static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
2332{
2333	struct insert_entries *arg = _arg;
2334
2335	gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, arg->flags);
2336	bxt_vtd_ggtt_wa(arg->vm);
2337
2338	return 0;
2339}
2340
2341static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
2342					     struct i915_vma *vma,
2343					     enum i915_cache_level level,
2344					     u32 flags)
2345{
2346	struct insert_entries arg = { vm, vma, level, flags };
2347
2348	stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
2349}
2350
2351struct clear_range {
2352	struct i915_address_space *vm;
2353	u64 start;
2354	u64 length;
2355};
2356
2357static int bxt_vtd_ggtt_clear_range__cb(void *_arg)
2358{
2359	struct clear_range *arg = _arg;
2360
2361	gen8_ggtt_clear_range(arg->vm, arg->start, arg->length);
2362	bxt_vtd_ggtt_wa(arg->vm);
2363
2364	return 0;
2365}
2366
2367static void bxt_vtd_ggtt_clear_range__BKL(struct i915_address_space *vm,
2368					  u64 start,
2369					  u64 length)
2370{
2371	struct clear_range arg = { vm, start, length };
2372
2373	stop_machine(bxt_vtd_ggtt_clear_range__cb, &arg, NULL);
2374}
2375
2376static void gen6_ggtt_clear_range(struct i915_address_space *vm,
2377				  u64 start, u64 length)
2378{
2379	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2380	unsigned first_entry = start / I915_GTT_PAGE_SIZE;
2381	unsigned num_entries = length / I915_GTT_PAGE_SIZE;
2382	gen6_pte_t scratch_pte, __iomem *gtt_base =
2383		(gen6_pte_t __iomem *)ggtt->gsm + first_entry;
2384	const int max_entries = ggtt_total_entries(ggtt) - first_entry;
2385	int i;
2386
2387	if (WARN(num_entries > max_entries,
2388		 "First entry = %d; Num entries = %d (max=%d)\n",
2389		 first_entry, num_entries, max_entries))
2390		num_entries = max_entries;
2391
2392	scratch_pte = vm->scratch[0].encode;
2393	for (i = 0; i < num_entries; i++)
2394		iowrite32(scratch_pte, &gtt_base[i]);
2395}
2396
2397static void i915_ggtt_insert_page(struct i915_address_space *vm,
2398				  dma_addr_t addr,
2399				  u64 offset,
2400				  enum i915_cache_level cache_level,
2401				  u32 unused)
2402{
2403	unsigned int flags = (cache_level == I915_CACHE_NONE) ?
2404		AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
2405
2406	intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
2407}
2408
2409static void i915_ggtt_insert_entries(struct i915_address_space *vm,
2410				     struct i915_vma *vma,
2411				     enum i915_cache_level cache_level,
2412				     u32 unused)
2413{
2414	unsigned int flags = (cache_level == I915_CACHE_NONE) ?
2415		AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
2416
2417	intel_gtt_insert_sg_entries(vma->pages, vma->node.start >> PAGE_SHIFT,
2418				    flags);
2419}
2420
2421static void i915_ggtt_clear_range(struct i915_address_space *vm,
2422				  u64 start, u64 length)
2423{
2424	intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
2425}
2426
2427static int ggtt_bind_vma(struct i915_vma *vma,
2428			 enum i915_cache_level cache_level,
2429			 u32 flags)
2430{
2431	struct drm_i915_private *i915 = vma->vm->i915;
2432	struct drm_i915_gem_object *obj = vma->obj;
2433	intel_wakeref_t wakeref;
2434	u32 pte_flags;
2435
2436	/* Applicable to VLV (gen8+ do not support RO in the GGTT) */
2437	pte_flags = 0;
2438	if (i915_gem_object_is_readonly(obj))
2439		pte_flags |= PTE_READ_ONLY;
2440
2441	with_intel_runtime_pm(&i915->runtime_pm, wakeref)
2442		vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
2443
2444	vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
2445
2446	/*
2447	 * Without aliasing PPGTT there's no difference between
2448	 * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
2449	 * upgrade to both bound if we bind either to avoid double-binding.
2450	 */
2451	vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
2452
2453	return 0;
2454}
2455
2456static void ggtt_unbind_vma(struct i915_vma *vma)
2457{
2458	struct drm_i915_private *i915 = vma->vm->i915;
2459	intel_wakeref_t wakeref;
2460
2461	with_intel_runtime_pm(&i915->runtime_pm, wakeref)
2462		vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
2463}
2464
2465static int aliasing_gtt_bind_vma(struct i915_vma *vma,
2466				 enum i915_cache_level cache_level,
2467				 u32 flags)
2468{
2469	struct drm_i915_private *i915 = vma->vm->i915;
2470	u32 pte_flags;
2471	int ret;
2472
2473	/* Currently applicable only to VLV */
2474	pte_flags = 0;
2475	if (i915_gem_object_is_readonly(vma->obj))
2476		pte_flags |= PTE_READ_ONLY;
2477
2478	if (flags & I915_VMA_LOCAL_BIND) {
2479		struct i915_ppgtt *alias = i915_vm_to_ggtt(vma->vm)->alias;
2480
2481		if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
2482			ret = alias->vm.allocate_va_range(&alias->vm,
2483							  vma->node.start,
2484							  vma->size);
2485			if (ret)
2486				return ret;
2487		}
2488
2489		alias->vm.insert_entries(&alias->vm, vma,
2490					 cache_level, pte_flags);
2491	}
2492
2493	if (flags & I915_VMA_GLOBAL_BIND) {
2494		intel_wakeref_t wakeref;
2495
2496		with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
2497			vma->vm->insert_entries(vma->vm, vma,
2498						cache_level, pte_flags);
2499		}
2500	}
2501
2502	return 0;
2503}
2504
2505static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
2506{
2507	struct drm_i915_private *i915 = vma->vm->i915;
2508
2509	if (vma->flags & I915_VMA_GLOBAL_BIND) {
2510		struct i915_address_space *vm = vma->vm;
2511		intel_wakeref_t wakeref;
2512
2513		with_intel_runtime_pm(&i915->runtime_pm, wakeref)
2514			vm->clear_range(vm, vma->node.start, vma->size);
2515	}
2516
2517	if (vma->flags & I915_VMA_LOCAL_BIND) {
2518		struct i915_address_space *vm =
2519			&i915_vm_to_ggtt(vma->vm)->alias->vm;
2520
2521		vm->clear_range(vm, vma->node.start, vma->size);
2522	}
2523}
2524
2525void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
2526			       struct sg_table *pages)
2527{
2528	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2529	struct device *kdev = &dev_priv->drm.pdev->dev;
2530	struct i915_ggtt *ggtt = &dev_priv->ggtt;
2531
2532	if (unlikely(ggtt->do_idle_maps)) {
2533		if (i915_gem_wait_for_idle(dev_priv, 0, MAX_SCHEDULE_TIMEOUT)) {
2534			DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
2535			/* Wait a bit, in hopes it avoids the hang */
2536			udelay(10);
2537		}
2538	}
2539
2540	dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL);
2541}
2542
2543static int ggtt_set_pages(struct i915_vma *vma)
2544{
2545	int ret;
2546
2547	GEM_BUG_ON(vma->pages);
2548
2549	ret = i915_get_ggtt_vma_pages(vma);
2550	if (ret)
2551		return ret;
2552
2553	vma->page_sizes = vma->obj->mm.page_sizes;
2554
2555	return 0;
2556}
2557
2558static void i915_gtt_color_adjust(const struct drm_mm_node *node,
2559				  unsigned long color,
2560				  u64 *start,
2561				  u64 *end)
2562{
2563	if (node->allocated && node->color != color)
2564		*start += I915_GTT_PAGE_SIZE;
2565
2566	/* Also leave a space between the unallocated reserved node after the
2567	 * GTT and any objects within the GTT, i.e. we use the color adjustment
2568	 * to insert a guard page to prevent prefetches crossing over the
2569	 * GTT boundary.
2570	 */
2571	node = list_next_entry(node, node_list);
2572	if (node->color != color)
2573		*end -= I915_GTT_PAGE_SIZE;
2574}
2575
2576static int init_aliasing_ppgtt(struct i915_ggtt *ggtt)
2577{
2578	struct i915_ppgtt *ppgtt;
2579	int err;
2580
2581	ppgtt = i915_ppgtt_create(ggtt->vm.i915);
2582	if (IS_ERR(ppgtt))
2583		return PTR_ERR(ppgtt);
2584
2585	if (GEM_WARN_ON(ppgtt->vm.total < ggtt->vm.total)) {
2586		err = -ENODEV;
2587		goto err_ppgtt;
2588	}
2589
2590	/*
2591	 * Note we only pre-allocate as far as the end of the global
2592	 * GTT. On 48b / 4-level page-tables, the difference is very,
2593	 * very significant! We have to preallocate as GVT/vgpu does
2594	 * not like the page directory disappearing.
2595	 */
2596	err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, ggtt->vm.total);
2597	if (err)
2598		goto err_ppgtt;
2599
2600	ggtt->alias = ppgtt;
2601
2602	GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != ggtt_bind_vma);
2603	ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma;
2604
2605	GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != ggtt_unbind_vma);
2606	ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma;
2607
2608	return 0;
2609
2610err_ppgtt:
2611	i915_vm_put(&ppgtt->vm);
2612	return err;
2613}
2614
2615static void fini_aliasing_ppgtt(struct i915_ggtt *ggtt)
2616{
2617	struct drm_i915_private *i915 = ggtt->vm.i915;
2618	struct i915_ppgtt *ppgtt;
2619
2620	mutex_lock(&i915->drm.struct_mutex);
2621
2622	ppgtt = fetch_and_zero(&ggtt->alias);
2623	if (!ppgtt)
2624		goto out;
2625
2626	i915_vm_put(&ppgtt->vm);
2627
2628	ggtt->vm.vma_ops.bind_vma   = ggtt_bind_vma;
2629	ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
2630
2631out:
2632	mutex_unlock(&i915->drm.struct_mutex);
2633}
2634
2635static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt)
2636{
2637	u64 size;
2638	int ret;
2639
2640	if (!USES_GUC(ggtt->vm.i915))
2641		return 0;
2642
2643	GEM_BUG_ON(ggtt->vm.total <= GUC_GGTT_TOP);
2644	size = ggtt->vm.total - GUC_GGTT_TOP;
2645
2646	ret = i915_gem_gtt_reserve(&ggtt->vm, &ggtt->uc_fw, size,
2647				   GUC_GGTT_TOP, I915_COLOR_UNEVICTABLE,
2648				   PIN_NOEVICT);
2649	if (ret)
2650		DRM_DEBUG_DRIVER("Failed to reserve top of GGTT for GuC\n");
2651
2652	return ret;
2653}
2654
2655static void ggtt_release_guc_top(struct i915_ggtt *ggtt)
2656{
2657	if (drm_mm_node_allocated(&ggtt->uc_fw))
2658		drm_mm_remove_node(&ggtt->uc_fw);
2659}
2660
2661static void cleanup_init_ggtt(struct i915_ggtt *ggtt)
2662{
2663	ggtt_release_guc_top(ggtt);
2664	drm_mm_remove_node(&ggtt->error_capture);
2665}
2666
2667static int init_ggtt(struct i915_ggtt *ggtt)
2668{
2669	/* Let GEM Manage all of the aperture.
2670	 *
2671	 * However, leave one page at the end still bound to the scratch page.
2672	 * There are a number of places where the hardware apparently prefetches
2673	 * past the end of the object, and we've seen multiple hangs with the
2674	 * GPU head pointer stuck in a batchbuffer bound at the last page of the
2675	 * aperture.  One page should be enough to keep any prefetching inside
2676	 * of the aperture.
2677	 */
2678	unsigned long hole_start, hole_end;
2679	struct drm_mm_node *entry;
2680	int ret;
2681
2682	/*
2683	 * GuC requires all resources that we're sharing with it to be placed in
2684	 * non-WOPCM memory. If GuC is not present or not in use we still need a
2685	 * small bias as ring wraparound at offset 0 sometimes hangs. No idea
2686	 * why.
2687	 */
2688	ggtt->pin_bias = max_t(u32, I915_GTT_PAGE_SIZE,
2689			       intel_wopcm_guc_size(&ggtt->vm.i915->wopcm));
2690
2691	ret = intel_vgt_balloon(ggtt);
2692	if (ret)
2693		return ret;
2694
2695	/* Reserve a mappable slot for our lockless error capture */
2696	ret = drm_mm_insert_node_in_range(&ggtt->vm.mm, &ggtt->error_capture,
2697					  PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
2698					  0, ggtt->mappable_end,
2699					  DRM_MM_INSERT_LOW);
2700	if (ret)
2701		return ret;
2702
2703	/*
2704	 * The upper portion of the GuC address space has a sizeable hole
2705	 * (several MB) that is inaccessible by GuC. Reserve this range within
2706	 * GGTT as it can comfortably hold GuC/HuC firmware images.
2707	 */
2708	ret = ggtt_reserve_guc_top(ggtt);
2709	if (ret)
2710		goto err;
2711
2712	/* Clear any non-preallocated blocks */
2713	drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) {
2714		DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
2715			      hole_start, hole_end);
2716		ggtt->vm.clear_range(&ggtt->vm, hole_start,
2717				     hole_end - hole_start);
2718	}
2719
2720	/* And finally clear the reserved guard page */
2721	ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE);
2722
2723	return 0;
2724
2725err:
2726	cleanup_init_ggtt(ggtt);
2727	return ret;
2728}
2729
2730int i915_init_ggtt(struct drm_i915_private *i915)
2731{
2732	int ret;
2733
2734	ret = init_ggtt(&i915->ggtt);
2735	if (ret)
2736		return ret;
2737
2738	if (INTEL_PPGTT(i915) == INTEL_PPGTT_ALIASING) {
2739		ret = init_aliasing_ppgtt(&i915->ggtt);
2740		if (ret)
2741			cleanup_init_ggtt(&i915->ggtt);
2742	}
2743
2744	return 0;
2745}
2746
2747static void ggtt_cleanup_hw(struct i915_ggtt *ggtt)
2748{
2749	struct drm_i915_private *i915 = ggtt->vm.i915;
2750	struct i915_vma *vma, *vn;
2751
2752	ggtt->vm.closed = true;
2753
2754	rcu_barrier(); /* flush the RCU'ed__i915_vm_release */
2755	flush_workqueue(i915->wq);
2756
2757	mutex_lock(&i915->drm.struct_mutex);
2758
2759	list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link)
2760		WARN_ON(i915_vma_unbind(vma));
2761
2762	if (drm_mm_node_allocated(&ggtt->error_capture))
2763		drm_mm_remove_node(&ggtt->error_capture);
2764
2765	ggtt_release_guc_top(ggtt);
2766
2767	if (drm_mm_initialized(&ggtt->vm.mm)) {
2768		intel_vgt_deballoon(ggtt);
2769		i915_address_space_fini(&ggtt->vm);
2770	}
2771
2772	ggtt->vm.cleanup(&ggtt->vm);
2773
2774	mutex_unlock(&i915->drm.struct_mutex);
2775
2776	arch_phys_wc_del(ggtt->mtrr);
2777	io_mapping_fini(&ggtt->iomap);
2778}
2779
2780/**
2781 * i915_ggtt_driver_release - Clean up GGTT hardware initialization
2782 * @i915: i915 device
2783 */
2784void i915_ggtt_driver_release(struct drm_i915_private *i915)
2785{
2786	struct pagevec *pvec;
2787
2788	fini_aliasing_ppgtt(&i915->ggtt);
2789
2790	ggtt_cleanup_hw(&i915->ggtt);
2791
2792	pvec = &i915->mm.wc_stash.pvec;
2793	if (pvec->nr) {
2794		set_pages_array_wb(pvec->pages, pvec->nr);
2795		__pagevec_release(pvec);
2796	}
2797
2798	i915_gem_cleanup_stolen(i915);
2799}
2800
2801static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
2802{
2803	snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
2804	snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
2805	return snb_gmch_ctl << 20;
2806}
2807
2808static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
2809{
2810	bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
2811	bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
2812	if (bdw_gmch_ctl)
2813		bdw_gmch_ctl = 1 << bdw_gmch_ctl;
2814
2815#ifdef CONFIG_X86_32
2816	/* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * I915_GTT_PAGE_SIZE */
2817	if (bdw_gmch_ctl > 4)
2818		bdw_gmch_ctl = 4;
2819#endif
2820
2821	return bdw_gmch_ctl << 20;
2822}
2823
2824static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
2825{
2826	gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
2827	gmch_ctrl &= SNB_GMCH_GGMS_MASK;
2828
2829	if (gmch_ctrl)
2830		return 1 << (20 + gmch_ctrl);
2831
2832	return 0;
2833}
2834
2835static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
2836{
2837	struct drm_i915_private *dev_priv = ggtt->vm.i915;
2838	struct pci_dev *pdev = dev_priv->drm.pdev;
2839	phys_addr_t phys_addr;
2840	int ret;
2841
2842	/* For Modern GENs the PTEs and register space are split in the BAR */
2843	phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2;
2844
2845	/*
2846	 * On BXT+/CNL+ writes larger than 64 bit to the GTT pagetable range
2847	 * will be dropped. For WC mappings in general we have 64 byte burst
2848	 * writes when the WC buffer is flushed, so we can't use it, but have to
2849	 * resort to an uncached mapping. The WC issue is easily caught by the
2850	 * readback check when writing GTT PTE entries.
2851	 */
2852	if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10)
2853		ggtt->gsm = ioremap_nocache(phys_addr, size);
2854	else
2855		ggtt->gsm = ioremap_wc(phys_addr, size);
2856	if (!ggtt->gsm) {
2857		DRM_ERROR("Failed to map the ggtt page table\n");
2858		return -ENOMEM;
2859	}
2860
2861	ret = setup_scratch_page(&ggtt->vm, GFP_DMA32);
2862	if (ret) {
2863		DRM_ERROR("Scratch setup failed\n");
2864		/* iounmap will also get called at remove, but meh */
2865		iounmap(ggtt->gsm);
2866		return ret;
2867	}
2868
2869	ggtt->vm.scratch[0].encode =
2870		ggtt->vm.pte_encode(px_dma(&ggtt->vm.scratch[0]),
2871				    I915_CACHE_NONE, 0);
2872
2873	return 0;
2874}
2875
2876static void tgl_setup_private_ppat(struct drm_i915_private *dev_priv)
2877{
2878	/* TGL doesn't support LLC or AGE settings */
2879	I915_WRITE(GEN12_PAT_INDEX(0), GEN8_PPAT_WB);
2880	I915_WRITE(GEN12_PAT_INDEX(1), GEN8_PPAT_WC);
2881	I915_WRITE(GEN12_PAT_INDEX(2), GEN8_PPAT_WT);
2882	I915_WRITE(GEN12_PAT_INDEX(3), GEN8_PPAT_UC);
2883	I915_WRITE(GEN12_PAT_INDEX(4), GEN8_PPAT_WB);
2884	I915_WRITE(GEN12_PAT_INDEX(5), GEN8_PPAT_WB);
2885	I915_WRITE(GEN12_PAT_INDEX(6), GEN8_PPAT_WB);
2886	I915_WRITE(GEN12_PAT_INDEX(7), GEN8_PPAT_WB);
2887}
2888
2889static void cnl_setup_private_ppat(struct drm_i915_private *dev_priv)
2890{
2891	I915_WRITE(GEN10_PAT_INDEX(0), GEN8_PPAT_WB | GEN8_PPAT_LLC);
2892	I915_WRITE(GEN10_PAT_INDEX(1), GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
2893	I915_WRITE(GEN10_PAT_INDEX(2), GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
2894	I915_WRITE(GEN10_PAT_INDEX(3), GEN8_PPAT_UC);
2895	I915_WRITE(GEN10_PAT_INDEX(4), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
2896	I915_WRITE(GEN10_PAT_INDEX(5), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
2897	I915_WRITE(GEN10_PAT_INDEX(6), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
2898	I915_WRITE(GEN10_PAT_INDEX(7), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
2899}
2900
2901/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
2902 * bits. When using advanced contexts each context stores its own PAT, but
2903 * writing this data shouldn't be harmful even in those cases. */
2904static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv)
2905{
2906	u64 pat;
2907
2908	pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) |	/* for normal objects, no eLLC */
2909	      GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) |	/* for something pointing to ptes? */
2910	      GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) |	/* for scanout with eLLC */
2911	      GEN8_PPAT(3, GEN8_PPAT_UC) |			/* Uncached objects, mostly for scanout */
2912	      GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
2913	      GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
2914	      GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
2915	      GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
2916
2917	I915_WRITE(GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
2918	I915_WRITE(GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
2919}
2920
2921static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
2922{
2923	u64 pat;
2924
2925	/*
2926	 * Map WB on BDW to snooped on CHV.
2927	 *
2928	 * Only the snoop bit has meaning for CHV, the rest is
2929	 * ignored.
2930	 *
2931	 * The hardware will never snoop for certain types of accesses:
2932	 * - CPU GTT (GMADR->GGTT->no snoop->memory)
2933	 * - PPGTT page tables
2934	 * - some other special cycles
2935	 *
2936	 * As with BDW, we also need to consider the following for GT accesses:
2937	 * "For GGTT, there is NO pat_sel[2:0] from the entry,
2938	 * so RTL will always use the value corresponding to
2939	 * pat_sel = 000".
2940	 * Which means we must set the snoop bit in PAT entry 0
2941	 * in order to keep the global status page working.
2942	 */
2943
2944	pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
2945	      GEN8_PPAT(1, 0) |
2946	      GEN8_PPAT(2, 0) |
2947	      GEN8_PPAT(3, 0) |
2948	      GEN8_PPAT(4, CHV_PPAT_SNOOP) |
2949	      GEN8_PPAT(5, CHV_PPAT_SNOOP) |
2950	      GEN8_PPAT(6, CHV_PPAT_SNOOP) |
2951	      GEN8_PPAT(7, CHV_PPAT_SNOOP);
2952
2953	I915_WRITE(GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
2954	I915_WRITE(GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
2955}
2956
2957static void gen6_gmch_remove(struct i915_address_space *vm)
2958{
2959	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2960
2961	iounmap(ggtt->gsm);
2962	cleanup_scratch_page(vm);
2963}
2964
2965static void setup_private_pat(struct drm_i915_private *dev_priv)
2966{
2967	GEM_BUG_ON(INTEL_GEN(dev_priv) < 8);
2968
2969	if (INTEL_GEN(dev_priv) >= 12)
2970		tgl_setup_private_ppat(dev_priv);
2971	else if (INTEL_GEN(dev_priv) >= 10)
2972		cnl_setup_private_ppat(dev_priv);
2973	else if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
2974		chv_setup_private_ppat(dev_priv);
2975	else
2976		bdw_setup_private_ppat(dev_priv);
2977}
2978
2979static int gen8_gmch_probe(struct i915_ggtt *ggtt)
2980{
2981	struct drm_i915_private *dev_priv = ggtt->vm.i915;
2982	struct pci_dev *pdev = dev_priv->drm.pdev;
2983	unsigned int size;
2984	u16 snb_gmch_ctl;
2985	int err;
2986
2987	/* TODO: We're not aware of mappable constraints on gen8 yet */
2988	ggtt->gmadr =
2989		(struct resource) DEFINE_RES_MEM(pci_resource_start(pdev, 2),
2990						 pci_resource_len(pdev, 2));
2991	ggtt->mappable_end = resource_size(&ggtt->gmadr);
2992
2993	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(39));
2994	if (!err)
2995		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39));
2996	if (err)
2997		DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
2998
2999	pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
3000	if (IS_CHERRYVIEW(dev_priv))
3001		size = chv_get_total_gtt_size(snb_gmch_ctl);
3002	else
3003		size = gen8_get_total_gtt_size(snb_gmch_ctl);
3004
3005	ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
3006	ggtt->vm.cleanup = gen6_gmch_remove;
3007	ggtt->vm.insert_page = gen8_ggtt_insert_page;
3008	ggtt->vm.clear_range = nop_clear_range;
3009	if (intel_scanout_needs_vtd_wa(dev_priv))
3010		ggtt->vm.clear_range = gen8_ggtt_clear_range;
3011
3012	ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
3013
3014	/* Serialize GTT updates with aperture access on BXT if VT-d is on. */
3015	if (intel_ggtt_update_needs_vtd_wa(dev_priv) ||
3016	    IS_CHERRYVIEW(dev_priv) /* fails with concurrent use/update */) {
3017		ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
3018		ggtt->vm.insert_page    = bxt_vtd_ggtt_insert_page__BKL;
3019		if (ggtt->vm.clear_range != nop_clear_range)
3020			ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL;
3021	}
3022
3023	ggtt->invalidate = gen6_ggtt_invalidate;
3024
3025	ggtt->vm.vma_ops.bind_vma    = ggtt_bind_vma;
3026	ggtt->vm.vma_ops.unbind_vma  = ggtt_unbind_vma;
3027	ggtt->vm.vma_ops.set_pages   = ggtt_set_pages;
3028	ggtt->vm.vma_ops.clear_pages = clear_pages;
3029
3030	ggtt->vm.pte_encode = gen8_pte_encode;
3031
3032	setup_private_pat(dev_priv);
3033
3034	return ggtt_probe_common(ggtt, size);
3035}
3036
3037static int gen6_gmch_probe(struct i915_ggtt *ggtt)
3038{
3039	struct drm_i915_private *dev_priv = ggtt->vm.i915;
3040	struct pci_dev *pdev = dev_priv->drm.pdev;
3041	unsigned int size;
3042	u16 snb_gmch_ctl;
3043	int err;
3044
3045	ggtt->gmadr =
3046		(struct resource) DEFINE_RES_MEM(pci_resource_start(pdev, 2),
3047						 pci_resource_len(pdev, 2));
3048	ggtt->mappable_end = resource_size(&ggtt->gmadr);
3049
3050	/* 64/512MB is the current min/max we actually know of, but this is just
3051	 * a coarse sanity check.
3052	 */
3053	if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) {
3054		DRM_ERROR("Unknown GMADR size (%pa)\n", &ggtt->mappable_end);
3055		return -ENXIO;
3056	}
3057
3058	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
3059	if (!err)
3060		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
3061	if (err)
3062		DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
3063	pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
3064
3065	size = gen6_get_total_gtt_size(snb_gmch_ctl);
3066	ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE;
3067
3068	ggtt->vm.clear_range = nop_clear_range;
3069	if (!HAS_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv))
3070		ggtt->vm.clear_range = gen6_ggtt_clear_range;
3071	ggtt->vm.insert_page = gen6_ggtt_insert_page;
3072	ggtt->vm.insert_entries = gen6_ggtt_insert_entries;
3073	ggtt->vm.cleanup = gen6_gmch_remove;
3074
3075	ggtt->invalidate = gen6_ggtt_invalidate;
3076
3077	if (HAS_EDRAM(dev_priv))
3078		ggtt->vm.pte_encode = iris_pte_encode;
3079	else if (IS_HASWELL(dev_priv))
3080		ggtt->vm.pte_encode = hsw_pte_encode;
3081	else if (IS_VALLEYVIEW(dev_priv))
3082		ggtt->vm.pte_encode = byt_pte_encode;
3083	else if (INTEL_GEN(dev_priv) >= 7)
3084		ggtt->vm.pte_encode = ivb_pte_encode;
3085	else
3086		ggtt->vm.pte_encode = snb_pte_encode;
3087
3088	ggtt->vm.vma_ops.bind_vma    = ggtt_bind_vma;
3089	ggtt->vm.vma_ops.unbind_vma  = ggtt_unbind_vma;
3090	ggtt->vm.vma_ops.set_pages   = ggtt_set_pages;
3091	ggtt->vm.vma_ops.clear_pages = clear_pages;
3092
3093	return ggtt_probe_common(ggtt, size);
3094}
3095
3096static void i915_gmch_remove(struct i915_address_space *vm)
3097{
3098	intel_gmch_remove();
3099}
3100
3101static int i915_gmch_probe(struct i915_ggtt *ggtt)
3102{
3103	struct drm_i915_private *dev_priv = ggtt->vm.i915;
3104	phys_addr_t gmadr_base;
3105	int ret;
3106
3107	ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL);
3108	if (!ret) {
3109		DRM_ERROR("failed to set up gmch\n");
3110		return -EIO;
3111	}
3112
3113	intel_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end);
3114
3115	ggtt->gmadr =
3116		(struct resource) DEFINE_RES_MEM(gmadr_base,
3117						 ggtt->mappable_end);
3118
3119	ggtt->do_idle_maps = needs_idle_maps(dev_priv);
3120	ggtt->vm.insert_page = i915_ggtt_insert_page;
3121	ggtt->vm.insert_entries = i915_ggtt_insert_entries;
3122	ggtt->vm.clear_range = i915_ggtt_clear_range;
3123	ggtt->vm.cleanup = i915_gmch_remove;
3124
3125	ggtt->invalidate = gmch_ggtt_invalidate;
3126
3127	ggtt->vm.vma_ops.bind_vma    = ggtt_bind_vma;
3128	ggtt->vm.vma_ops.unbind_vma  = ggtt_unbind_vma;
3129	ggtt->vm.vma_ops.set_pages   = ggtt_set_pages;
3130	ggtt->vm.vma_ops.clear_pages = clear_pages;
3131
3132	if (unlikely(ggtt->do_idle_maps))
3133		dev_notice(dev_priv->drm.dev,
3134			   "Applying Ironlake quirks for intel_iommu\n");
3135
3136	return 0;
3137}
3138
3139static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt)
3140{
3141	struct drm_i915_private *i915 = gt->i915;
3142	int ret;
3143
3144	ggtt->vm.gt = gt;
3145	ggtt->vm.i915 = i915;
3146	ggtt->vm.dma = &i915->drm.pdev->dev;
3147
3148	if (INTEL_GEN(i915) <= 5)
3149		ret = i915_gmch_probe(ggtt);
3150	else if (INTEL_GEN(i915) < 8)
3151		ret = gen6_gmch_probe(ggtt);
3152	else
3153		ret = gen8_gmch_probe(ggtt);
3154	if (ret)
3155		return ret;
3156
3157	if ((ggtt->vm.total - 1) >> 32) {
3158		DRM_ERROR("We never expected a Global GTT with more than 32bits"
3159			  " of address space! Found %lldM!\n",
3160			  ggtt->vm.total >> 20);
3161		ggtt->vm.total = 1ULL << 32;
3162		ggtt->mappable_end =
3163			min_t(u64, ggtt->mappable_end, ggtt->vm.total);
3164	}
3165
3166	if (ggtt->mappable_end > ggtt->vm.total) {
3167		DRM_ERROR("mappable aperture extends past end of GGTT,"
3168			  " aperture=%pa, total=%llx\n",
3169			  &ggtt->mappable_end, ggtt->vm.total);
3170		ggtt->mappable_end = ggtt->vm.total;
3171	}
3172
3173	/* GMADR is the PCI mmio aperture into the global GTT. */
3174	DRM_DEBUG_DRIVER("GGTT size = %lluM\n", ggtt->vm.total >> 20);
3175	DRM_DEBUG_DRIVER("GMADR size = %lluM\n", (u64)ggtt->mappable_end >> 20);
3176	DRM_DEBUG_DRIVER("DSM size = %lluM\n",
3177			 (u64)resource_size(&intel_graphics_stolen_res) >> 20);
3178
3179	return 0;
3180}
3181
3182/**
3183 * i915_ggtt_probe_hw - Probe GGTT hardware location
3184 * @i915: i915 device
3185 */
3186int i915_ggtt_probe_hw(struct drm_i915_private *i915)
3187{
3188	int ret;
3189
3190	ret = ggtt_probe_hw(&i915->ggtt, &i915->gt);
3191	if (ret)
3192		return ret;
3193
3194	if (intel_vtd_active())
3195		dev_info(i915->drm.dev, "VT-d active for gfx access\n");
3196
3197	return 0;
3198}
3199
3200static int ggtt_init_hw(struct i915_ggtt *ggtt)
3201{
3202	struct drm_i915_private *i915 = ggtt->vm.i915;
3203	int ret = 0;
3204
3205	mutex_lock(&i915->drm.struct_mutex);
3206
3207	i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
3208
3209	ggtt->vm.is_ggtt = true;
3210
3211	/* Only VLV supports read-only GGTT mappings */
3212	ggtt->vm.has_read_only = IS_VALLEYVIEW(i915);
3213
3214	if (!HAS_LLC(i915) && !HAS_PPGTT(i915))
3215		ggtt->vm.mm.color_adjust = i915_gtt_color_adjust;
3216
3217	if (!io_mapping_init_wc(&ggtt->iomap,
3218				ggtt->gmadr.start,
3219				ggtt->mappable_end)) {
3220		ggtt->vm.cleanup(&ggtt->vm);
3221		ret = -EIO;
3222		goto out;
3223	}
3224
3225	ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start, ggtt->mappable_end);
3226
3227	i915_ggtt_init_fences(ggtt);
3228
3229out:
3230	mutex_unlock(&i915->drm.struct_mutex);
3231
3232	return ret;
3233}
3234
3235/**
3236 * i915_ggtt_init_hw - Initialize GGTT hardware
3237 * @dev_priv: i915 device
3238 */
3239int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
3240{
3241	int ret;
3242
3243	stash_init(&dev_priv->mm.wc_stash);
3244
3245	/* Note that we use page colouring to enforce a guard page at the
3246	 * end of the address space. This is required as the CS may prefetch
3247	 * beyond the end of the batch buffer, across the page boundary,
3248	 * and beyond the end of the GTT if we do not provide a guard.
3249	 */
3250	ret = ggtt_init_hw(&dev_priv->ggtt);
3251	if (ret)
3252		return ret;
3253
3254	/*
3255	 * Initialise stolen early so that we may reserve preallocated
3256	 * objects for the BIOS to KMS transition.
3257	 */
3258	ret = i915_gem_init_stolen(dev_priv);
3259	if (ret)
3260		goto out_gtt_cleanup;
3261
3262	return 0;
3263
3264out_gtt_cleanup:
3265	dev_priv->ggtt.vm.cleanup(&dev_priv->ggtt.vm);
3266	return ret;
3267}
3268
3269int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv)
3270{
3271	if (INTEL_GEN(dev_priv) < 6 && !intel_enable_gtt())
3272		return -EIO;
3273
3274	return 0;
3275}
3276
3277void i915_ggtt_enable_guc(struct i915_ggtt *ggtt)
3278{
3279	GEM_BUG_ON(ggtt->invalidate != gen6_ggtt_invalidate);
3280
3281	ggtt->invalidate = guc_ggtt_invalidate;
3282
3283	ggtt->invalidate(ggtt);
3284}
3285
3286void i915_ggtt_disable_guc(struct i915_ggtt *ggtt)
3287{
3288	/* XXX Temporary pardon for error unload */
3289	if (ggtt->invalidate == gen6_ggtt_invalidate)
3290		return;
3291
3292	/* We should only be called after i915_ggtt_enable_guc() */
3293	GEM_BUG_ON(ggtt->invalidate != guc_ggtt_invalidate);
3294
3295	ggtt->invalidate = gen6_ggtt_invalidate;
3296
3297	ggtt->invalidate(ggtt);
3298}
3299
3300static void ggtt_restore_mappings(struct i915_ggtt *ggtt)
3301{
3302	struct i915_vma *vma, *vn;
3303	bool flush = false;
3304
3305	intel_gt_check_and_clear_faults(ggtt->vm.gt);
3306
3307	mutex_lock(&ggtt->vm.mutex);
3308
3309	/* First fill our portion of the GTT with scratch pages */
3310	ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
3311	ggtt->vm.closed = true; /* skip rewriting PTE on VMA unbind */
3312
3313	/* clflush objects bound into the GGTT and rebind them. */
3314	list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) {
3315		struct drm_i915_gem_object *obj = vma->obj;
3316
3317		if (!(vma->flags & I915_VMA_GLOBAL_BIND))
3318			continue;
3319
3320		mutex_unlock(&ggtt->vm.mutex);
3321
3322		if (!i915_vma_unbind(vma))
3323			goto lock;
3324
3325		WARN_ON(i915_vma_bind(vma,
3326				      obj ? obj->cache_level : 0,
3327				      PIN_UPDATE));
3328		if (obj) { /* only used during resume => exclusive access */
3329			flush |= fetch_and_zero(&obj->write_domain);
3330			obj->read_domains |= I915_GEM_DOMAIN_GTT;
3331		}
3332
3333lock:
3334		mutex_lock(&ggtt->vm.mutex);
3335	}
3336
3337	ggtt->vm.closed = false;
3338	ggtt->invalidate(ggtt);
3339
3340	mutex_unlock(&ggtt->vm.mutex);
3341
3342	if (flush)
3343		wbinvd_on_all_cpus();
3344}
3345
3346void i915_gem_restore_gtt_mappings(struct drm_i915_private *i915)
3347{
3348	ggtt_restore_mappings(&i915->ggtt);
3349
3350	if (INTEL_GEN(i915) >= 8)
3351		setup_private_pat(i915);
3352}
3353
3354static struct scatterlist *
3355rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset,
3356	     unsigned int width, unsigned int height,
3357	     unsigned int stride,
3358	     struct sg_table *st, struct scatterlist *sg)
3359{
3360	unsigned int column, row;
3361	unsigned int src_idx;
3362
3363	for (column = 0; column < width; column++) {
3364		src_idx = stride * (height - 1) + column + offset;
3365		for (row = 0; row < height; row++) {
3366			st->nents++;
3367			/* We don't need the pages, but need to initialize
3368			 * the entries so the sg list can be happily traversed.
3369			 * The only thing we need are DMA addresses.
3370			 */
3371			sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0);
3372			sg_dma_address(sg) =
3373				i915_gem_object_get_dma_address(obj, src_idx);
3374			sg_dma_len(sg) = I915_GTT_PAGE_SIZE;
3375			sg = sg_next(sg);
3376			src_idx -= stride;
3377		}
3378	}
3379
3380	return sg;
3381}
3382
3383static noinline struct sg_table *
3384intel_rotate_pages(struct intel_rotation_info *rot_info,
3385		   struct drm_i915_gem_object *obj)
3386{
3387	unsigned int size = intel_rotation_info_size(rot_info);
3388	struct sg_table *st;
3389	struct scatterlist *sg;
3390	int ret = -ENOMEM;
3391	int i;
3392
3393	/* Allocate target SG list. */
3394	st = kmalloc(sizeof(*st), GFP_KERNEL);
3395	if (!st)
3396		goto err_st_alloc;
3397
3398	ret = sg_alloc_table(st, size, GFP_KERNEL);
3399	if (ret)
3400		goto err_sg_alloc;
3401
3402	st->nents = 0;
3403	sg = st->sgl;
3404
3405	for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) {
3406		sg = rotate_pages(obj, rot_info->plane[i].offset,
3407				  rot_info->plane[i].width, rot_info->plane[i].height,
3408				  rot_info->plane[i].stride, st, sg);
3409	}
3410
3411	return st;
3412
3413err_sg_alloc:
3414	kfree(st);
3415err_st_alloc:
3416
3417	DRM_DEBUG_DRIVER("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
3418			 obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
3419
3420	return ERR_PTR(ret);
3421}
3422
3423static struct scatterlist *
3424remap_pages(struct drm_i915_gem_object *obj, unsigned int offset,
3425	    unsigned int width, unsigned int height,
3426	    unsigned int stride,
3427	    struct sg_table *st, struct scatterlist *sg)
3428{
3429	unsigned int row;
3430
3431	for (row = 0; row < height; row++) {
3432		unsigned int left = width * I915_GTT_PAGE_SIZE;
3433
3434		while (left) {
3435			dma_addr_t addr;
3436			unsigned int length;
3437
3438			/* We don't need the pages, but need to initialize
3439			 * the entries so the sg list can be happily traversed.
3440			 * The only thing we need are DMA addresses.
3441			 */
3442
3443			addr = i915_gem_object_get_dma_address_len(obj, offset, &length);
3444
3445			length = min(left, length);
3446
3447			st->nents++;
3448
3449			sg_set_page(sg, NULL, length, 0);
3450			sg_dma_address(sg) = addr;
3451			sg_dma_len(sg) = length;
3452			sg = sg_next(sg);
3453
3454			offset += length / I915_GTT_PAGE_SIZE;
3455			left -= length;
3456		}
3457
3458		offset += stride - width;
3459	}
3460
3461	return sg;
3462}
3463
3464static noinline struct sg_table *
3465intel_remap_pages(struct intel_remapped_info *rem_info,
3466		  struct drm_i915_gem_object *obj)
3467{
3468	unsigned int size = intel_remapped_info_size(rem_info);
3469	struct sg_table *st;
3470	struct scatterlist *sg;
3471	int ret = -ENOMEM;
3472	int i;
3473
3474	/* Allocate target SG list. */
3475	st = kmalloc(sizeof(*st), GFP_KERNEL);
3476	if (!st)
3477		goto err_st_alloc;
3478
3479	ret = sg_alloc_table(st, size, GFP_KERNEL);
3480	if (ret)
3481		goto err_sg_alloc;
3482
3483	st->nents = 0;
3484	sg = st->sgl;
3485
3486	for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) {
3487		sg = remap_pages(obj, rem_info->plane[i].offset,
3488				 rem_info->plane[i].width, rem_info->plane[i].height,
3489				 rem_info->plane[i].stride, st, sg);
3490	}
3491
3492	i915_sg_trim(st);
3493
3494	return st;
3495
3496err_sg_alloc:
3497	kfree(st);
3498err_st_alloc:
3499
3500	DRM_DEBUG_DRIVER("Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n",
3501			 obj->base.size, rem_info->plane[0].width, rem_info->plane[0].height, size);
3502
3503	return ERR_PTR(ret);
3504}
3505
3506static noinline struct sg_table *
3507intel_partial_pages(const struct i915_ggtt_view *view,
3508		    struct drm_i915_gem_object *obj)
3509{
3510	struct sg_table *st;
3511	struct scatterlist *sg, *iter;
3512	unsigned int count = view->partial.size;
3513	unsigned int offset;
3514	int ret = -ENOMEM;
3515
3516	st = kmalloc(sizeof(*st), GFP_KERNEL);
3517	if (!st)
3518		goto err_st_alloc;
3519
3520	ret = sg_alloc_table(st, count, GFP_KERNEL);
3521	if (ret)
3522		goto err_sg_alloc;
3523
3524	iter = i915_gem_object_get_sg(obj, view->partial.offset, &offset);
3525	GEM_BUG_ON(!iter);
3526
3527	sg = st->sgl;
3528	st->nents = 0;
3529	do {
3530		unsigned int len;
3531
3532		len = min(iter->length - (offset << PAGE_SHIFT),
3533			  count << PAGE_SHIFT);
3534		sg_set_page(sg, NULL, len, 0);
3535		sg_dma_address(sg) =
3536			sg_dma_address(iter) + (offset << PAGE_SHIFT);
3537		sg_dma_len(sg) = len;
3538
3539		st->nents++;
3540		count -= len >> PAGE_SHIFT;
3541		if (count == 0) {
3542			sg_mark_end(sg);
3543			i915_sg_trim(st); /* Drop any unused tail entries. */
3544
3545			return st;
3546		}
3547
3548		sg = __sg_next(sg);
3549		iter = __sg_next(iter);
3550		offset = 0;
3551	} while (1);
3552
3553err_sg_alloc:
3554	kfree(st);
3555err_st_alloc:
3556	return ERR_PTR(ret);
3557}
3558
3559static int
3560i915_get_ggtt_vma_pages(struct i915_vma *vma)
3561{
 
 
 
3562	int ret;
3563
3564	/* The vma->pages are only valid within the lifespan of the borrowed
3565	 * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so
3566	 * must be the vma->pages. A simple rule is that vma->pages must only
3567	 * be accessed when the obj->mm.pages are pinned.
3568	 */
3569	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
 
3570
3571	switch (vma->ggtt_view.type) {
3572	default:
3573		GEM_BUG_ON(vma->ggtt_view.type);
3574		/* fall through */
3575	case I915_GGTT_VIEW_NORMAL:
3576		vma->pages = vma->obj->mm.pages;
3577		return 0;
 
 
3578
3579	case I915_GGTT_VIEW_ROTATED:
3580		vma->pages =
3581			intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj);
3582		break;
3583
3584	case I915_GGTT_VIEW_REMAPPED:
3585		vma->pages =
3586			intel_remap_pages(&vma->ggtt_view.remapped, vma->obj);
3587		break;
3588
3589	case I915_GGTT_VIEW_PARTIAL:
3590		vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
3591		break;
3592	}
3593
3594	ret = 0;
3595	if (IS_ERR(vma->pages)) {
3596		ret = PTR_ERR(vma->pages);
3597		vma->pages = NULL;
3598		DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n",
3599			  vma->ggtt_view.type, ret);
3600	}
3601	return ret;
3602}
3603
3604/**
3605 * i915_gem_gtt_reserve - reserve a node in an address_space (GTT)
3606 * @vm: the &struct i915_address_space
3607 * @node: the &struct drm_mm_node (typically i915_vma.mode)
3608 * @size: how much space to allocate inside the GTT,
3609 *        must be #I915_GTT_PAGE_SIZE aligned
3610 * @offset: where to insert inside the GTT,
3611 *          must be #I915_GTT_MIN_ALIGNMENT aligned, and the node
3612 *          (@offset + @size) must fit within the address space
3613 * @color: color to apply to node, if this node is not from a VMA,
3614 *         color must be #I915_COLOR_UNEVICTABLE
3615 * @flags: control search and eviction behaviour
3616 *
3617 * i915_gem_gtt_reserve() tries to insert the @node at the exact @offset inside
3618 * the address space (using @size and @color). If the @node does not fit, it
3619 * tries to evict any overlapping nodes from the GTT, including any
3620 * neighbouring nodes if the colors do not match (to ensure guard pages between
3621 * differing domains). See i915_gem_evict_for_node() for the gory details
3622 * on the eviction algorithm. #PIN_NONBLOCK may used to prevent waiting on
3623 * evicting active overlapping objects, and any overlapping node that is pinned
3624 * or marked as unevictable will also result in failure.
3625 *
3626 * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
3627 * asked to wait for eviction and interrupted.
3628 */
3629int i915_gem_gtt_reserve(struct i915_address_space *vm,
3630			 struct drm_mm_node *node,
3631			 u64 size, u64 offset, unsigned long color,
3632			 unsigned int flags)
3633{
3634	int err;
 
 
3635
3636	GEM_BUG_ON(!size);
3637	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
3638	GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT));
3639	GEM_BUG_ON(range_overflows(offset, size, vm->total));
3640	GEM_BUG_ON(vm == &vm->i915->ggtt.alias->vm);
3641	GEM_BUG_ON(drm_mm_node_allocated(node));
3642
3643	node->size = size;
3644	node->start = offset;
3645	node->color = color;
3646
3647	err = drm_mm_reserve_node(&vm->mm, node);
3648	if (err != -ENOSPC)
3649		return err;
3650
3651	if (flags & PIN_NOEVICT)
3652		return -ENOSPC;
3653
3654	err = i915_gem_evict_for_node(vm, node, flags);
3655	if (err == 0)
3656		err = drm_mm_reserve_node(&vm->mm, node);
3657
3658	return err;
3659}
3660
3661static u64 random_offset(u64 start, u64 end, u64 len, u64 align)
3662{
3663	u64 range, addr;
3664
3665	GEM_BUG_ON(range_overflows(start, len, end));
3666	GEM_BUG_ON(round_up(start, align) > round_down(end - len, align));
3667
3668	range = round_down(end - len, align) - round_up(start, align);
3669	if (range) {
3670		if (sizeof(unsigned long) == sizeof(u64)) {
3671			addr = get_random_long();
3672		} else {
3673			addr = get_random_int();
3674			if (range > U32_MAX) {
3675				addr <<= 32;
3676				addr |= get_random_int();
3677			}
3678		}
3679		div64_u64_rem(addr, range, &addr);
3680		start += addr;
3681	}
3682
3683	return round_up(start, align);
3684}
3685
3686/**
3687 * i915_gem_gtt_insert - insert a node into an address_space (GTT)
3688 * @vm: the &struct i915_address_space
3689 * @node: the &struct drm_mm_node (typically i915_vma.node)
3690 * @size: how much space to allocate inside the GTT,
3691 *        must be #I915_GTT_PAGE_SIZE aligned
3692 * @alignment: required alignment of starting offset, may be 0 but
3693 *             if specified, this must be a power-of-two and at least
3694 *             #I915_GTT_MIN_ALIGNMENT
3695 * @color: color to apply to node
3696 * @start: start of any range restriction inside GTT (0 for all),
3697 *         must be #I915_GTT_PAGE_SIZE aligned
3698 * @end: end of any range restriction inside GTT (U64_MAX for all),
3699 *       must be #I915_GTT_PAGE_SIZE aligned if not U64_MAX
3700 * @flags: control search and eviction behaviour
3701 *
3702 * i915_gem_gtt_insert() first searches for an available hole into which
3703 * is can insert the node. The hole address is aligned to @alignment and
3704 * its @size must then fit entirely within the [@start, @end] bounds. The
3705 * nodes on either side of the hole must match @color, or else a guard page
3706 * will be inserted between the two nodes (or the node evicted). If no
3707 * suitable hole is found, first a victim is randomly selected and tested
3708 * for eviction, otherwise then the LRU list of objects within the GTT
3709 * is scanned to find the first set of replacement nodes to create the hole.
3710 * Those old overlapping nodes are evicted from the GTT (and so must be
3711 * rebound before any future use). Any node that is currently pinned cannot
3712 * be evicted (see i915_vma_pin()). Similar if the node's VMA is currently
3713 * active and #PIN_NONBLOCK is specified, that node is also skipped when
3714 * searching for an eviction candidate. See i915_gem_evict_something() for
3715 * the gory details on the eviction algorithm.
3716 *
3717 * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
3718 * asked to wait for eviction and interrupted.
3719 */
3720int i915_gem_gtt_insert(struct i915_address_space *vm,
3721			struct drm_mm_node *node,
3722			u64 size, u64 alignment, unsigned long color,
3723			u64 start, u64 end, unsigned int flags)
3724{
3725	enum drm_mm_insert_mode mode;
3726	u64 offset;
3727	int err;
3728
3729	lockdep_assert_held(&vm->i915->drm.struct_mutex);
3730	GEM_BUG_ON(!size);
3731	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
3732	GEM_BUG_ON(alignment && !is_power_of_2(alignment));
3733	GEM_BUG_ON(alignment && !IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
3734	GEM_BUG_ON(start >= end);
3735	GEM_BUG_ON(start > 0  && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
3736	GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
3737	GEM_BUG_ON(vm == &vm->i915->ggtt.alias->vm);
3738	GEM_BUG_ON(drm_mm_node_allocated(node));
3739
3740	if (unlikely(range_overflows(start, size, end)))
3741		return -ENOSPC;
3742
3743	if (unlikely(round_up(start, alignment) > round_down(end - size, alignment)))
3744		return -ENOSPC;
3745
3746	mode = DRM_MM_INSERT_BEST;
3747	if (flags & PIN_HIGH)
3748		mode = DRM_MM_INSERT_HIGHEST;
3749	if (flags & PIN_MAPPABLE)
3750		mode = DRM_MM_INSERT_LOW;
3751
3752	/* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
3753	 * so we know that we always have a minimum alignment of 4096.
3754	 * The drm_mm range manager is optimised to return results
3755	 * with zero alignment, so where possible use the optimal
3756	 * path.
3757	 */
3758	BUILD_BUG_ON(I915_GTT_MIN_ALIGNMENT > I915_GTT_PAGE_SIZE);
3759	if (alignment <= I915_GTT_MIN_ALIGNMENT)
3760		alignment = 0;
3761
3762	err = drm_mm_insert_node_in_range(&vm->mm, node,
3763					  size, alignment, color,
3764					  start, end, mode);
3765	if (err != -ENOSPC)
3766		return err;
3767
3768	if (mode & DRM_MM_INSERT_ONCE) {
3769		err = drm_mm_insert_node_in_range(&vm->mm, node,
3770						  size, alignment, color,
3771						  start, end,
3772						  DRM_MM_INSERT_BEST);
3773		if (err != -ENOSPC)
3774			return err;
3775	}
3776
3777	if (flags & PIN_NOEVICT)
3778		return -ENOSPC;
3779
3780	/*
3781	 * No free space, pick a slot at random.
3782	 *
3783	 * There is a pathological case here using a GTT shared between
3784	 * mmap and GPU (i.e. ggtt/aliasing_ppgtt but not full-ppgtt):
3785	 *
3786	 *    |<-- 256 MiB aperture -->||<-- 1792 MiB unmappable -->|
3787	 *         (64k objects)             (448k objects)
3788	 *
3789	 * Now imagine that the eviction LRU is ordered top-down (just because
3790	 * pathology meets real life), and that we need to evict an object to
3791	 * make room inside the aperture. The eviction scan then has to walk
3792	 * the 448k list before it finds one within range. And now imagine that
3793	 * it has to search for a new hole between every byte inside the memcpy,
3794	 * for several simultaneous clients.
3795	 *
3796	 * On a full-ppgtt system, if we have run out of available space, there
3797	 * will be lots and lots of objects in the eviction list! Again,
3798	 * searching that LRU list may be slow if we are also applying any
3799	 * range restrictions (e.g. restriction to low 4GiB) and so, for
3800	 * simplicity and similarilty between different GTT, try the single
3801	 * random replacement first.
3802	 */
3803	offset = random_offset(start, end,
3804			       size, alignment ?: I915_GTT_MIN_ALIGNMENT);
3805	err = i915_gem_gtt_reserve(vm, node, size, offset, color, flags);
3806	if (err != -ENOSPC)
3807		return err;
3808
3809	if (flags & PIN_NOSEARCH)
3810		return -ENOSPC;
3811
3812	/* Randomly selected placement is pinned, do a search */
3813	err = i915_gem_evict_something(vm, size, alignment, color,
3814				       start, end, flags);
3815	if (err)
3816		return err;
3817
3818	return drm_mm_insert_node_in_range(&vm->mm, node,
3819					   size, alignment, color,
3820					   start, end, DRM_MM_INSERT_EVICT);
3821}
3822
3823#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
3824#include "selftests/mock_gtt.c"
3825#include "selftests/i915_gem_gtt.c"
3826#endif
v3.1
  1/*
  2 * Copyright © 2010 Daniel Vetter
 
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice (including the next
 12 * paragraph) shall be included in all copies or substantial portions of the
 13 * Software.
 14 *
 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 21 * IN THE SOFTWARE.
 22 *
 23 */
 24
 25#include "drmP.h"
 26#include "drm.h"
 27#include "i915_drm.h"
 
 
 
 
 
 
 
 
 
 
 
 
 
 28#include "i915_drv.h"
 
 29#include "i915_trace.h"
 30#include "intel_drv.h"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 31
 32/* XXX kill agp_type! */
 33static unsigned int cache_level_to_agp_type(struct drm_device *dev,
 34					    enum i915_cache_level cache_level)
 35{
 36	switch (cache_level) {
 37	case I915_CACHE_LLC_MLC:
 38		if (INTEL_INFO(dev)->gen >= 6)
 39			return AGP_USER_CACHED_MEMORY_LLC_MLC;
 40		/* Older chipsets do not have this extra level of CPU
 41		 * cacheing, so fallthrough and request the PTE simply
 42		 * as cached.
 43		 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 44	case I915_CACHE_LLC:
 45		return AGP_USER_CACHED_MEMORY;
 
 
 
 
 46	default:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 47	case I915_CACHE_NONE:
 48		return AGP_USER_MEMORY;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 49	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 50}
 51
 52void i915_gem_restore_gtt_mappings(struct drm_device *dev)
 53{
 54	struct drm_i915_private *dev_priv = dev->dev_private;
 55	struct drm_i915_gem_object *obj;
 
 
 
 
 56
 57	/* First fill our portion of the GTT with scratch pages */
 58	intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE,
 59			      (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 60
 61	list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
 62		i915_gem_clflush_object(obj);
 63		i915_gem_gtt_rebind_object(obj, obj->cache_level);
 
 
 
 
 64	}
 65
 66	intel_gtt_chipset_flush();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 67}
 68
 69int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj)
 
 70{
 71	struct drm_device *dev = obj->base.dev;
 72	struct drm_i915_private *dev_priv = dev->dev_private;
 73	unsigned int agp_type = cache_level_to_agp_type(dev, obj->cache_level);
 74	int ret;
 75
 76	if (dev_priv->mm.gtt->needs_dmar) {
 77		ret = intel_gtt_map_memory(obj->pages,
 78					   obj->base.size >> PAGE_SHIFT,
 79					   &obj->sg_list,
 80					   &obj->num_sg);
 81		if (ret != 0)
 82			return ret;
 83
 84		intel_gtt_insert_sg_entries(obj->sg_list,
 85					    obj->num_sg,
 86					    obj->gtt_space->start >> PAGE_SHIFT,
 87					    agp_type);
 88	} else
 89		intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
 90				       obj->base.size >> PAGE_SHIFT,
 91				       obj->pages,
 92				       agp_type);
 93
 94	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 95}
 96
 97void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj,
 98				enum i915_cache_level cache_level)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 99{
100	struct drm_device *dev = obj->base.dev;
101	struct drm_i915_private *dev_priv = dev->dev_private;
102	unsigned int agp_type = cache_level_to_agp_type(dev, cache_level);
103
104	if (dev_priv->mm.gtt->needs_dmar) {
105		BUG_ON(!obj->sg_list);
 
 
 
 
106
107		intel_gtt_insert_sg_entries(obj->sg_list,
108					    obj->num_sg,
109					    obj->gtt_space->start >> PAGE_SHIFT,
110					    agp_type);
111	} else
112		intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
113				       obj->base.size >> PAGE_SHIFT,
114				       obj->pages,
115				       agp_type);
 
 
 
 
 
 
 
116}
117
118void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
119{
120	intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
121			      obj->base.size >> PAGE_SHIFT);
 
 
122
123	if (obj->sg_list) {
124		intel_gtt_unmap_memory(obj->sg_list, obj->num_sg);
125		obj->sg_list = NULL;
 
 
 
 
 
 
 
 
 
 
126	}
 
 
127}