Linux Audio

Check our new training course

Linux BSP upgrade and security maintenance

Need help to get security updates for your Linux BSP?
Loading...
v5.4
   1/*
   2 * Copyright © 2010 Daniel Vetter
   3 * Copyright © 2011-2014 Intel Corporation
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the "Software"),
   7 * to deal in the Software without restriction, including without limitation
   8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   9 * and/or sell copies of the Software, and to permit persons to whom the
  10 * Software is furnished to do so, subject to the following conditions:
  11 *
  12 * The above copyright notice and this permission notice (including the next
  13 * paragraph) shall be included in all copies or substantial portions of the
  14 * Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  22 * IN THE SOFTWARE.
  23 *
  24 */
  25
  26#include <linux/slab.h> /* fault-inject.h is not standalone! */
  27
  28#include <linux/fault-inject.h>
  29#include <linux/log2.h>
  30#include <linux/random.h>
  31#include <linux/seq_file.h>
  32#include <linux/stop_machine.h>
  33
  34#include <asm/set_memory.h>
  35#include <asm/smp.h>
  36
  37#include <drm/i915_drm.h>
  38
  39#include "display/intel_frontbuffer.h"
  40#include "gt/intel_gt.h"
  41
  42#include "i915_drv.h"
  43#include "i915_scatterlist.h"
  44#include "i915_trace.h"
  45#include "i915_vgpu.h"
  46
  47#define I915_GFP_ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
  48
  49#if IS_ENABLED(CONFIG_DRM_I915_TRACE_GTT)
  50#define DBG(...) trace_printk(__VA_ARGS__)
  51#else
  52#define DBG(...)
  53#endif
  54
  55/**
  56 * DOC: Global GTT views
  57 *
  58 * Background and previous state
  59 *
  60 * Historically objects could exists (be bound) in global GTT space only as
  61 * singular instances with a view representing all of the object's backing pages
  62 * in a linear fashion. This view will be called a normal view.
  63 *
  64 * To support multiple views of the same object, where the number of mapped
  65 * pages is not equal to the backing store, or where the layout of the pages
  66 * is not linear, concept of a GGTT view was added.
  67 *
  68 * One example of an alternative view is a stereo display driven by a single
  69 * image. In this case we would have a framebuffer looking like this
  70 * (2x2 pages):
  71 *
  72 *    12
  73 *    34
  74 *
  75 * Above would represent a normal GGTT view as normally mapped for GPU or CPU
  76 * rendering. In contrast, fed to the display engine would be an alternative
  77 * view which could look something like this:
  78 *
  79 *   1212
  80 *   3434
  81 *
  82 * In this example both the size and layout of pages in the alternative view is
  83 * different from the normal view.
  84 *
  85 * Implementation and usage
  86 *
  87 * GGTT views are implemented using VMAs and are distinguished via enum
  88 * i915_ggtt_view_type and struct i915_ggtt_view.
  89 *
  90 * A new flavour of core GEM functions which work with GGTT bound objects were
  91 * added with the _ggtt_ infix, and sometimes with _view postfix to avoid
  92 * renaming  in large amounts of code. They take the struct i915_ggtt_view
  93 * parameter encapsulating all metadata required to implement a view.
  94 *
  95 * As a helper for callers which are only interested in the normal view,
  96 * globally const i915_ggtt_view_normal singleton instance exists. All old core
  97 * GEM API functions, the ones not taking the view parameter, are operating on,
  98 * or with the normal GGTT view.
  99 *
 100 * Code wanting to add or use a new GGTT view needs to:
 101 *
 102 * 1. Add a new enum with a suitable name.
 103 * 2. Extend the metadata in the i915_ggtt_view structure if required.
 104 * 3. Add support to i915_get_vma_pages().
 105 *
 106 * New views are required to build a scatter-gather table from within the
 107 * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and
 108 * exists for the lifetime of an VMA.
 109 *
 110 * Core API is designed to have copy semantics which means that passed in
 111 * struct i915_ggtt_view does not need to be persistent (left around after
 112 * calling the core API functions).
 113 *
 114 */
 115
 116#define as_pd(x) container_of((x), typeof(struct i915_page_directory), pt)
 117
 118static int
 119i915_get_ggtt_vma_pages(struct i915_vma *vma);
 120
 121static void gen6_ggtt_invalidate(struct i915_ggtt *ggtt)
 122{
 123	struct intel_uncore *uncore = ggtt->vm.gt->uncore;
 124
 125	/*
 126	 * Note that as an uncached mmio write, this will flush the
 127	 * WCB of the writes into the GGTT before it triggers the invalidate.
 128	 */
 129	intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
 130}
 131
 132static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
 133{
 134	struct intel_uncore *uncore = ggtt->vm.gt->uncore;
 135
 136	gen6_ggtt_invalidate(ggtt);
 137	intel_uncore_write_fw(uncore, GEN8_GTCR, GEN8_GTCR_INVALIDATE);
 138}
 139
 140static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt)
 141{
 142	intel_gtt_chipset_flush();
 143}
 144
 145static int ppgtt_bind_vma(struct i915_vma *vma,
 146			  enum i915_cache_level cache_level,
 147			  u32 unused)
 148{
 149	u32 pte_flags;
 150	int err;
 151
 152	if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
 153		err = vma->vm->allocate_va_range(vma->vm,
 154						 vma->node.start, vma->size);
 155		if (err)
 156			return err;
 157	}
 158
 159	/* Applicable to VLV, and gen8+ */
 160	pte_flags = 0;
 161	if (i915_gem_object_is_readonly(vma->obj))
 162		pte_flags |= PTE_READ_ONLY;
 163
 164	vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
 165
 166	return 0;
 167}
 168
 169static void ppgtt_unbind_vma(struct i915_vma *vma)
 170{
 171	vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
 172}
 173
 174static int ppgtt_set_pages(struct i915_vma *vma)
 175{
 176	GEM_BUG_ON(vma->pages);
 177
 178	vma->pages = vma->obj->mm.pages;
 179
 180	vma->page_sizes = vma->obj->mm.page_sizes;
 181
 182	return 0;
 183}
 184
 185static void clear_pages(struct i915_vma *vma)
 186{
 187	GEM_BUG_ON(!vma->pages);
 188
 189	if (vma->pages != vma->obj->mm.pages) {
 190		sg_free_table(vma->pages);
 191		kfree(vma->pages);
 
 
 192	}
 193	vma->pages = NULL;
 194
 195	memset(&vma->page_sizes, 0, sizeof(vma->page_sizes));
 196}
 197
 198static u64 gen8_pte_encode(dma_addr_t addr,
 199			   enum i915_cache_level level,
 200			   u32 flags)
 201{
 202	gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW;
 203
 204	if (unlikely(flags & PTE_READ_ONLY))
 205		pte &= ~_PAGE_RW;
 
 
 
 
 
 
 
 
 
 
 
 
 206
 207	switch (level) {
 208	case I915_CACHE_NONE:
 209		pte |= PPAT_UNCACHED;
 210		break;
 211	case I915_CACHE_WT:
 212		pte |= PPAT_DISPLAY_ELLC;
 213		break;
 214	default:
 215		pte |= PPAT_CACHED;
 216		break;
 217	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 218
 
 
 
 
 
 
 
 
 
 
 219	return pte;
 220}
 221
 222static u64 gen8_pde_encode(const dma_addr_t addr,
 223			   const enum i915_cache_level level)
 
 224{
 225	u64 pde = _PAGE_PRESENT | _PAGE_RW;
 226	pde |= addr;
 227	if (level != I915_CACHE_NONE)
 228		pde |= PPAT_CACHED_PDE;
 229	else
 230		pde |= PPAT_UNCACHED;
 231	return pde;
 232}
 233
 234static u64 snb_pte_encode(dma_addr_t addr,
 235			  enum i915_cache_level level,
 236			  u32 flags)
 237{
 238	gen6_pte_t pte = GEN6_PTE_VALID;
 239	pte |= GEN6_PTE_ADDR_ENCODE(addr);
 240
 241	switch (level) {
 242	case I915_CACHE_L3_LLC:
 243	case I915_CACHE_LLC:
 244		pte |= GEN6_PTE_CACHE_LLC;
 245		break;
 246	case I915_CACHE_NONE:
 247		pte |= GEN6_PTE_UNCACHED;
 248		break;
 249	default:
 250		MISSING_CASE(level);
 251	}
 252
 253	return pte;
 254}
 255
 256static u64 ivb_pte_encode(dma_addr_t addr,
 257			  enum i915_cache_level level,
 258			  u32 flags)
 259{
 260	gen6_pte_t pte = GEN6_PTE_VALID;
 261	pte |= GEN6_PTE_ADDR_ENCODE(addr);
 262
 263	switch (level) {
 264	case I915_CACHE_L3_LLC:
 265		pte |= GEN7_PTE_CACHE_L3_LLC;
 266		break;
 267	case I915_CACHE_LLC:
 268		pte |= GEN6_PTE_CACHE_LLC;
 269		break;
 270	case I915_CACHE_NONE:
 271		pte |= GEN6_PTE_UNCACHED;
 272		break;
 273	default:
 274		MISSING_CASE(level);
 275	}
 276
 277	return pte;
 278}
 279
 280static u64 byt_pte_encode(dma_addr_t addr,
 281			  enum i915_cache_level level,
 282			  u32 flags)
 
 
 
 283{
 284	gen6_pte_t pte = GEN6_PTE_VALID;
 285	pte |= GEN6_PTE_ADDR_ENCODE(addr);
 286
 287	if (!(flags & PTE_READ_ONLY))
 288		pte |= BYT_PTE_WRITEABLE;
 
 
 289
 290	if (level != I915_CACHE_NONE)
 291		pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
 292
 293	return pte;
 294}
 295
 296static u64 hsw_pte_encode(dma_addr_t addr,
 297			  enum i915_cache_level level,
 298			  u32 flags)
 299{
 300	gen6_pte_t pte = GEN6_PTE_VALID;
 301	pte |= HSW_PTE_ADDR_ENCODE(addr);
 302
 303	if (level != I915_CACHE_NONE)
 304		pte |= HSW_WB_LLC_AGE3;
 305
 306	return pte;
 307}
 308
 309static u64 iris_pte_encode(dma_addr_t addr,
 310			   enum i915_cache_level level,
 311			   u32 flags)
 312{
 313	gen6_pte_t pte = GEN6_PTE_VALID;
 314	pte |= HSW_PTE_ADDR_ENCODE(addr);
 315
 316	switch (level) {
 317	case I915_CACHE_NONE:
 318		break;
 319	case I915_CACHE_WT:
 320		pte |= HSW_WT_ELLC_LLC_AGE3;
 321		break;
 322	default:
 323		pte |= HSW_WB_ELLC_LLC_AGE3;
 324		break;
 325	}
 326
 327	return pte;
 328}
 329
 330static void stash_init(struct pagestash *stash)
 
 
 331{
 332	pagevec_init(&stash->pvec);
 333	spin_lock_init(&stash->lock);
 334}
 335
 336static struct page *stash_pop_page(struct pagestash *stash)
 337{
 338	struct page *page = NULL;
 339
 340	spin_lock(&stash->lock);
 341	if (likely(stash->pvec.nr))
 342		page = stash->pvec.pages[--stash->pvec.nr];
 343	spin_unlock(&stash->lock);
 344
 345	return page;
 346}
 347
 348static void stash_push_pagevec(struct pagestash *stash, struct pagevec *pvec)
 349{
 350	unsigned int nr;
 351
 352	spin_lock_nested(&stash->lock, SINGLE_DEPTH_NESTING);
 
 
 
 
 353
 354	nr = min_t(typeof(nr), pvec->nr, pagevec_space(&stash->pvec));
 355	memcpy(stash->pvec.pages + stash->pvec.nr,
 356	       pvec->pages + pvec->nr - nr,
 357	       sizeof(pvec->pages[0]) * nr);
 358	stash->pvec.nr += nr;
 359
 360	spin_unlock(&stash->lock);
 
 
 
 
 
 
 361
 362	pvec->nr -= nr;
 363}
 364
 365static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
 
 
 366{
 367	struct pagevec stack;
 368	struct page *page;
 369
 370	if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
 371		i915_gem_shrink_all(vm->i915);
 372
 373	page = stash_pop_page(&vm->free_pages);
 374	if (page)
 375		return page;
 376
 377	if (!vm->pt_kmap_wc)
 378		return alloc_page(gfp);
 379
 380	/* Look in our global stash of WC pages... */
 381	page = stash_pop_page(&vm->i915->mm.wc_stash);
 382	if (page)
 383		return page;
 384
 385	/*
 386	 * Otherwise batch allocate pages to amortize cost of set_pages_wc.
 387	 *
 388	 * We have to be careful as page allocation may trigger the shrinker
 389	 * (via direct reclaim) which will fill up the WC stash underneath us.
 390	 * So we add our WB pages into a temporary pvec on the stack and merge
 391	 * them into the WC stash after all the allocations are complete.
 392	 */
 393	pagevec_init(&stack);
 394	do {
 395		struct page *page;
 396
 397		page = alloc_page(gfp);
 398		if (unlikely(!page))
 399			break;
 400
 401		stack.pages[stack.nr++] = page;
 402	} while (pagevec_space(&stack));
 403
 404	if (stack.nr && !set_pages_array_wc(stack.pages, stack.nr)) {
 405		page = stack.pages[--stack.nr];
 406
 407		/* Merge spare WC pages to the global stash */
 408		if (stack.nr)
 409			stash_push_pagevec(&vm->i915->mm.wc_stash, &stack);
 410
 411		/* Push any surplus WC pages onto the local VM stash */
 412		if (stack.nr)
 413			stash_push_pagevec(&vm->free_pages, &stack);
 414	}
 415
 416	/* Return unwanted leftovers */
 417	if (unlikely(stack.nr)) {
 418		WARN_ON_ONCE(set_pages_array_wb(stack.pages, stack.nr));
 419		__pagevec_release(&stack);
 420	}
 421
 422	return page;
 423}
 424
 425static void vm_free_pages_release(struct i915_address_space *vm,
 426				  bool immediate)
 
 
 427{
 428	struct pagevec *pvec = &vm->free_pages.pvec;
 429	struct pagevec stack;
 
 
 
 
 
 
 430
 431	lockdep_assert_held(&vm->free_pages.lock);
 432	GEM_BUG_ON(!pagevec_count(pvec));
 433
 434	if (vm->pt_kmap_wc) {
 435		/*
 436		 * When we use WC, first fill up the global stash and then
 437		 * only if full immediately free the overflow.
 438		 */
 439		stash_push_pagevec(&vm->i915->mm.wc_stash, pvec);
 440
 441		/*
 442		 * As we have made some room in the VM's free_pages,
 443		 * we can wait for it to fill again. Unless we are
 444		 * inside i915_address_space_fini() and must
 445		 * immediately release the pages!
 446		 */
 447		if (pvec->nr <= (immediate ? 0 : PAGEVEC_SIZE - 1))
 448			return;
 449
 450		/*
 451		 * We have to drop the lock to allow ourselves to sleep,
 452		 * so take a copy of the pvec and clear the stash for
 453		 * others to use it as we sleep.
 454		 */
 455		stack = *pvec;
 456		pagevec_reinit(pvec);
 457		spin_unlock(&vm->free_pages.lock);
 458
 459		pvec = &stack;
 460		set_pages_array_wb(pvec->pages, pvec->nr);
 461
 462		spin_lock(&vm->free_pages.lock);
 463	}
 464
 465	__pagevec_release(pvec);
 466}
 467
 468static void vm_free_page(struct i915_address_space *vm, struct page *page)
 469{
 470	/*
 471	 * On !llc, we need to change the pages back to WB. We only do so
 472	 * in bulk, so we rarely need to change the page attributes here,
 473	 * but doing so requires a stop_machine() from deep inside arch/x86/mm.
 474	 * To make detection of the possible sleep more likely, use an
 475	 * unconditional might_sleep() for everybody.
 476	 */
 477	might_sleep();
 478	spin_lock(&vm->free_pages.lock);
 479	while (!pagevec_space(&vm->free_pages.pvec))
 480		vm_free_pages_release(vm, false);
 481	GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec) >= PAGEVEC_SIZE);
 482	pagevec_add(&vm->free_pages.pvec, page);
 483	spin_unlock(&vm->free_pages.lock);
 484}
 485
 486static void i915_address_space_fini(struct i915_address_space *vm)
 487{
 488	spin_lock(&vm->free_pages.lock);
 489	if (pagevec_count(&vm->free_pages.pvec))
 490		vm_free_pages_release(vm, true);
 491	GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec));
 492	spin_unlock(&vm->free_pages.lock);
 493
 494	drm_mm_takedown(&vm->mm);
 
 
 495
 496	mutex_destroy(&vm->mutex);
 497}
 498
 499static void ppgtt_destroy_vma(struct i915_address_space *vm)
 500{
 501	struct list_head *phases[] = {
 502		&vm->bound_list,
 503		&vm->unbound_list,
 504		NULL,
 505	}, **phase;
 506
 507	mutex_lock(&vm->i915->drm.struct_mutex);
 508	for (phase = phases; *phase; phase++) {
 509		struct i915_vma *vma, *vn;
 510
 511		list_for_each_entry_safe(vma, vn, *phase, vm_link)
 512			i915_vma_destroy(vma);
 
 
 
 513	}
 514	mutex_unlock(&vm->i915->drm.struct_mutex);
 515}
 516
 517static void __i915_vm_release(struct work_struct *work)
 
 
 
 518{
 519	struct i915_address_space *vm =
 520		container_of(work, struct i915_address_space, rcu.work);
 521
 522	ppgtt_destroy_vma(vm);
 523
 524	GEM_BUG_ON(!list_empty(&vm->bound_list));
 525	GEM_BUG_ON(!list_empty(&vm->unbound_list));
 526
 527	vm->cleanup(vm);
 528	i915_address_space_fini(vm);
 529
 530	kfree(vm);
 531}
 532
 533void i915_vm_release(struct kref *kref)
 534{
 535	struct i915_address_space *vm =
 536		container_of(kref, struct i915_address_space, ref);
 537
 538	GEM_BUG_ON(i915_is_ggtt(vm));
 539	trace_i915_ppgtt_release(vm);
 540
 541	vm->closed = true;
 542	queue_rcu_work(vm->i915->wq, &vm->rcu);
 543}
 544
 545static void i915_address_space_init(struct i915_address_space *vm, int subclass)
 546{
 547	kref_init(&vm->ref);
 548	INIT_RCU_WORK(&vm->rcu, __i915_vm_release);
 549
 550	/*
 551	 * The vm->mutex must be reclaim safe (for use in the shrinker).
 552	 * Do a dummy acquire now under fs_reclaim so that any allocation
 553	 * attempt holding the lock is immediately reported by lockdep.
 554	 */
 555	mutex_init(&vm->mutex);
 556	lockdep_set_subclass(&vm->mutex, subclass);
 557	i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex);
 558
 559	GEM_BUG_ON(!vm->total);
 560	drm_mm_init(&vm->mm, 0, vm->total);
 561	vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
 562
 563	stash_init(&vm->free_pages);
 
 
 564
 565	INIT_LIST_HEAD(&vm->unbound_list);
 566	INIT_LIST_HEAD(&vm->bound_list);
 567}
 568
 569static int __setup_page_dma(struct i915_address_space *vm,
 570			    struct i915_page_dma *p,
 571			    gfp_t gfp)
 572{
 573	p->page = vm_alloc_page(vm, gfp | I915_GFP_ALLOW_FAIL);
 574	if (unlikely(!p->page))
 575		return -ENOMEM;
 576
 577	p->daddr = dma_map_page_attrs(vm->dma,
 578				      p->page, 0, PAGE_SIZE,
 579				      PCI_DMA_BIDIRECTIONAL,
 580				      DMA_ATTR_SKIP_CPU_SYNC |
 581				      DMA_ATTR_NO_WARN);
 582	if (unlikely(dma_mapping_error(vm->dma, p->daddr))) {
 583		vm_free_page(vm, p->page);
 584		return -ENOMEM;
 585	}
 586
 587	return 0;
 588}
 589
 590static int setup_page_dma(struct i915_address_space *vm,
 591			  struct i915_page_dma *p)
 592{
 593	return __setup_page_dma(vm, p, __GFP_HIGHMEM);
 594}
 595
 596static void cleanup_page_dma(struct i915_address_space *vm,
 597			     struct i915_page_dma *p)
 598{
 599	dma_unmap_page(vm->dma, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
 600	vm_free_page(vm, p->page);
 601}
 602
 603#define kmap_atomic_px(px) kmap_atomic(px_base(px)->page)
 604
 605static void
 606fill_page_dma(const struct i915_page_dma *p, const u64 val, unsigned int count)
 607{
 608	kunmap_atomic(memset64(kmap_atomic(p->page), val, count));
 609}
 610
 611#define fill_px(px, v) fill_page_dma(px_base(px), (v), PAGE_SIZE / sizeof(u64))
 612#define fill32_px(px, v) do {						\
 613	u64 v__ = lower_32_bits(v);					\
 614	fill_px((px), v__ << 32 | v__);					\
 615} while (0)
 616
 617static int
 618setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
 619{
 620	unsigned long size;
 621
 622	/*
 623	 * In order to utilize 64K pages for an object with a size < 2M, we will
 624	 * need to support a 64K scratch page, given that every 16th entry for a
 625	 * page-table operating in 64K mode must point to a properly aligned 64K
 626	 * region, including any PTEs which happen to point to scratch.
 627	 *
 628	 * This is only relevant for the 48b PPGTT where we support
 629	 * huge-gtt-pages, see also i915_vma_insert(). However, as we share the
 630	 * scratch (read-only) between all vm, we create one 64k scratch page
 631	 * for all.
 632	 */
 633	size = I915_GTT_PAGE_SIZE_4K;
 634	if (i915_vm_is_4lvl(vm) &&
 635	    HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K)) {
 636		size = I915_GTT_PAGE_SIZE_64K;
 637		gfp |= __GFP_NOWARN;
 638	}
 639	gfp |= __GFP_ZERO | __GFP_RETRY_MAYFAIL;
 640
 641	do {
 642		unsigned int order = get_order(size);
 643		struct page *page;
 644		dma_addr_t addr;
 645
 646		page = alloc_pages(gfp, order);
 647		if (unlikely(!page))
 648			goto skip;
 649
 650		addr = dma_map_page_attrs(vm->dma,
 651					  page, 0, size,
 652					  PCI_DMA_BIDIRECTIONAL,
 653					  DMA_ATTR_SKIP_CPU_SYNC |
 654					  DMA_ATTR_NO_WARN);
 655		if (unlikely(dma_mapping_error(vm->dma, addr)))
 656			goto free_page;
 657
 658		if (unlikely(!IS_ALIGNED(addr, size)))
 659			goto unmap_page;
 660
 661		vm->scratch[0].base.page = page;
 662		vm->scratch[0].base.daddr = addr;
 663		vm->scratch_order = order;
 664		return 0;
 665
 666unmap_page:
 667		dma_unmap_page(vm->dma, addr, size, PCI_DMA_BIDIRECTIONAL);
 668free_page:
 669		__free_pages(page, order);
 670skip:
 671		if (size == I915_GTT_PAGE_SIZE_4K)
 672			return -ENOMEM;
 673
 674		size = I915_GTT_PAGE_SIZE_4K;
 675		gfp &= ~__GFP_NOWARN;
 676	} while (1);
 677}
 678
 679static void cleanup_scratch_page(struct i915_address_space *vm)
 680{
 681	struct i915_page_dma *p = px_base(&vm->scratch[0]);
 682	unsigned int order = vm->scratch_order;
 683
 684	dma_unmap_page(vm->dma, p->daddr, BIT(order) << PAGE_SHIFT,
 685		       PCI_DMA_BIDIRECTIONAL);
 686	__free_pages(p->page, order);
 687}
 688
 689static void free_scratch(struct i915_address_space *vm)
 690{
 691	int i;
 692
 693	if (!px_dma(&vm->scratch[0])) /* set to 0 on clones */
 694		return;
 695
 696	for (i = 1; i <= vm->top; i++) {
 697		if (!px_dma(&vm->scratch[i]))
 698			break;
 699		cleanup_page_dma(vm, px_base(&vm->scratch[i]));
 700	}
 701
 702	cleanup_scratch_page(vm);
 703}
 704
 705static struct i915_page_table *alloc_pt(struct i915_address_space *vm)
 706{
 707	struct i915_page_table *pt;
 
 708
 709	pt = kmalloc(sizeof(*pt), I915_GFP_ALLOW_FAIL);
 710	if (unlikely(!pt))
 711		return ERR_PTR(-ENOMEM);
 
 
 712
 713	if (unlikely(setup_page_dma(vm, &pt->base))) {
 714		kfree(pt);
 715		return ERR_PTR(-ENOMEM);
 716	}
 717
 718	atomic_set(&pt->used, 0);
 719	return pt;
 
 
 
 
 
 720}
 721
 722static struct i915_page_directory *__alloc_pd(size_t sz)
 723{
 724	struct i915_page_directory *pd;
 
 725
 726	pd = kzalloc(sz, I915_GFP_ALLOW_FAIL);
 727	if (unlikely(!pd))
 728		return NULL;
 729
 730	spin_lock_init(&pd->lock);
 731	return pd;
 732}
 733
 734static struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
 735{
 736	struct i915_page_directory *pd;
 
 737
 738	pd = __alloc_pd(sizeof(*pd));
 739	if (unlikely(!pd))
 740		return ERR_PTR(-ENOMEM);
 741
 742	if (unlikely(setup_page_dma(vm, px_base(pd)))) {
 743		kfree(pd);
 744		return ERR_PTR(-ENOMEM);
 
 745	}
 746
 747	return pd;
 748}
 749
 750static void free_pd(struct i915_address_space *vm, struct i915_page_dma *pd)
 751{
 752	cleanup_page_dma(vm, pd);
 753	kfree(pd);
 754}
 755
 756#define free_px(vm, px) free_pd(vm, px_base(px))
 757
 758static inline void
 759write_dma_entry(struct i915_page_dma * const pdma,
 760		const unsigned short idx,
 761		const u64 encoded_entry)
 762{
 763	u64 * const vaddr = kmap_atomic(pdma->page);
 764
 765	vaddr[idx] = encoded_entry;
 766	kunmap_atomic(vaddr);
 767}
 768
 769static inline void
 770__set_pd_entry(struct i915_page_directory * const pd,
 771	       const unsigned short idx,
 772	       struct i915_page_dma * const to,
 773	       u64 (*encode)(const dma_addr_t, const enum i915_cache_level))
 774{
 775	/* Each thread pre-pins the pd, and we may have a thread per pde. */
 776	GEM_BUG_ON(atomic_read(px_used(pd)) > 2 * ARRAY_SIZE(pd->entry));
 777
 778	atomic_inc(px_used(pd));
 779	pd->entry[idx] = to;
 780	write_dma_entry(px_base(pd), idx, encode(to->daddr, I915_CACHE_LLC));
 781}
 782
 783#define set_pd_entry(pd, idx, to) \
 784	__set_pd_entry((pd), (idx), px_base(to), gen8_pde_encode)
 785
 786static inline void
 787clear_pd_entry(struct i915_page_directory * const pd,
 788	       const unsigned short idx,
 789	       const struct i915_page_scratch * const scratch)
 790{
 791	GEM_BUG_ON(atomic_read(px_used(pd)) == 0);
 792
 793	write_dma_entry(px_base(pd), idx, scratch->encode);
 794	pd->entry[idx] = NULL;
 795	atomic_dec(px_used(pd));
 796}
 797
 798static bool
 799release_pd_entry(struct i915_page_directory * const pd,
 800		 const unsigned short idx,
 801		 struct i915_page_table * const pt,
 802		 const struct i915_page_scratch * const scratch)
 803{
 804	bool free = false;
 805
 806	if (atomic_add_unless(&pt->used, -1, 1))
 807		return false;
 808
 809	spin_lock(&pd->lock);
 810	if (atomic_dec_and_test(&pt->used)) {
 811		clear_pd_entry(pd, idx, scratch);
 812		free = true;
 813	}
 814	spin_unlock(&pd->lock);
 815
 816	return free;
 817}
 818
 819/*
 820 * PDE TLBs are a pain to invalidate on GEN8+. When we modify
 821 * the page table structures, we mark them dirty so that
 822 * context switching/execlist queuing code takes extra steps
 823 * to ensure that tlbs are flushed.
 824 */
 825static void mark_tlbs_dirty(struct i915_ppgtt *ppgtt)
 826{
 827	ppgtt->pd_dirty_engines = ALL_ENGINES;
 828}
 829
 830static void gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create)
 831{
 832	struct drm_i915_private *dev_priv = ppgtt->vm.i915;
 833	enum vgt_g2v_type msg;
 834	int i;
 835
 836	if (create)
 837		atomic_inc(px_used(ppgtt->pd)); /* never remove */
 838	else
 839		atomic_dec(px_used(ppgtt->pd));
 840
 841	mutex_lock(&dev_priv->vgpu.lock);
 842
 843	if (i915_vm_is_4lvl(&ppgtt->vm)) {
 844		const u64 daddr = px_dma(ppgtt->pd);
 845
 846		I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
 847		I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
 848
 849		msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
 850				VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY);
 851	} else {
 852		for (i = 0; i < GEN8_3LVL_PDPES; i++) {
 853			const u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
 854
 855			I915_WRITE(vgtif_reg(pdp[i].lo), lower_32_bits(daddr));
 856			I915_WRITE(vgtif_reg(pdp[i].hi), upper_32_bits(daddr));
 857		}
 858
 859		msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
 860				VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY);
 861	}
 862
 863	/* g2v_notify atomically (via hv trap) consumes the message packet. */
 864	I915_WRITE(vgtif_reg(g2v_notify), msg);
 865
 866	mutex_unlock(&dev_priv->vgpu.lock);
 867}
 868
 869/* Index shifts into the pagetable are offset by GEN8_PTE_SHIFT [12] */
 870#define GEN8_PAGE_SIZE (SZ_4K) /* page and page-directory sizes are the same */
 871#define GEN8_PTE_SHIFT (ilog2(GEN8_PAGE_SIZE))
 872#define GEN8_PDES (GEN8_PAGE_SIZE / sizeof(u64))
 873#define gen8_pd_shift(lvl) ((lvl) * ilog2(GEN8_PDES))
 874#define gen8_pd_index(i, lvl) i915_pde_index((i), gen8_pd_shift(lvl))
 875#define __gen8_pte_shift(lvl) (GEN8_PTE_SHIFT + gen8_pd_shift(lvl))
 876#define __gen8_pte_index(a, lvl) i915_pde_index((a), __gen8_pte_shift(lvl))
 877
 878static inline unsigned int
 879gen8_pd_range(u64 start, u64 end, int lvl, unsigned int *idx)
 880{
 881	const int shift = gen8_pd_shift(lvl);
 882	const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
 
 883
 884	GEM_BUG_ON(start >= end);
 885	end += ~mask >> gen8_pd_shift(1);
 886
 887	*idx = i915_pde_index(start, shift);
 888	if ((start ^ end) & mask)
 889		return GEN8_PDES - *idx;
 890	else
 891		return i915_pde_index(end, shift) - *idx;
 892}
 893
 894static inline bool gen8_pd_contains(u64 start, u64 end, int lvl)
 
 895{
 896	const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
 897
 898	GEM_BUG_ON(start >= end);
 899	return (start ^ end) & mask && (start & ~mask) == 0;
 900}
 901
 902static inline unsigned int gen8_pt_count(u64 start, u64 end)
 903{
 904	GEM_BUG_ON(start >= end);
 905	if ((start ^ end) >> gen8_pd_shift(1))
 906		return GEN8_PDES - (start & (GEN8_PDES - 1));
 907	else
 908		return end - start;
 909}
 910
 911static inline unsigned int gen8_pd_top_count(const struct i915_address_space *vm)
 912{
 913	unsigned int shift = __gen8_pte_shift(vm->top);
 914	return (vm->total + (1ull << shift) - 1) >> shift;
 915}
 916
 917static inline struct i915_page_directory *
 918gen8_pdp_for_page_index(struct i915_address_space * const vm, const u64 idx)
 919{
 920	struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
 921
 922	if (vm->top == 2)
 923		return ppgtt->pd;
 924	else
 925		return i915_pd_entry(ppgtt->pd, gen8_pd_index(idx, vm->top));
 926}
 927
 928static inline struct i915_page_directory *
 929gen8_pdp_for_page_address(struct i915_address_space * const vm, const u64 addr)
 930{
 931	return gen8_pdp_for_page_index(vm, addr >> GEN8_PTE_SHIFT);
 932}
 933
 934static void __gen8_ppgtt_cleanup(struct i915_address_space *vm,
 935				 struct i915_page_directory *pd,
 936				 int count, int lvl)
 937{
 938	if (lvl) {
 939		void **pde = pd->entry;
 940
 941		do {
 942			if (!*pde)
 943				continue;
 944
 945			__gen8_ppgtt_cleanup(vm, *pde, GEN8_PDES, lvl - 1);
 946		} while (pde++, --count);
 947	}
 948
 949	free_px(vm, pd);
 950}
 
 951
 952static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
 953{
 954	struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
 955
 956	if (intel_vgpu_active(vm->i915))
 957		gen8_ppgtt_notify_vgt(ppgtt, false);
 958
 959	__gen8_ppgtt_cleanup(vm, ppgtt->pd, gen8_pd_top_count(vm), vm->top);
 960	free_scratch(vm);
 961}
 962
 963static u64 __gen8_ppgtt_clear(struct i915_address_space * const vm,
 964			      struct i915_page_directory * const pd,
 965			      u64 start, const u64 end, int lvl)
 966{
 967	const struct i915_page_scratch * const scratch = &vm->scratch[lvl];
 968	unsigned int idx, len;
 969
 970	GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
 971
 972	len = gen8_pd_range(start, end, lvl--, &idx);
 973	DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n",
 974	    __func__, vm, lvl + 1, start, end,
 975	    idx, len, atomic_read(px_used(pd)));
 976	GEM_BUG_ON(!len || len >= atomic_read(px_used(pd)));
 977
 978	do {
 979		struct i915_page_table *pt = pd->entry[idx];
 980
 981		if (atomic_fetch_inc(&pt->used) >> gen8_pd_shift(1) &&
 982		    gen8_pd_contains(start, end, lvl)) {
 983			DBG("%s(%p):{ lvl:%d, idx:%d, start:%llx, end:%llx } removing pd\n",
 984			    __func__, vm, lvl + 1, idx, start, end);
 985			clear_pd_entry(pd, idx, scratch);
 986			__gen8_ppgtt_cleanup(vm, as_pd(pt), I915_PDES, lvl);
 987			start += (u64)I915_PDES << gen8_pd_shift(lvl);
 988			continue;
 989		}
 990
 991		if (lvl) {
 992			start = __gen8_ppgtt_clear(vm, as_pd(pt),
 993						   start, end, lvl);
 994		} else {
 995			unsigned int count;
 996			u64 *vaddr;
 997
 998			count = gen8_pt_count(start, end);
 999			DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } removing pte\n",
1000			    __func__, vm, lvl, start, end,
1001			    gen8_pd_index(start, 0), count,
1002			    atomic_read(&pt->used));
1003			GEM_BUG_ON(!count || count >= atomic_read(&pt->used));
1004
1005			vaddr = kmap_atomic_px(pt);
1006			memset64(vaddr + gen8_pd_index(start, 0),
1007				 vm->scratch[0].encode,
1008				 count);
1009			kunmap_atomic(vaddr);
1010
1011			atomic_sub(count, &pt->used);
1012			start += count;
1013		}
 
 
 
1014
1015		if (release_pd_entry(pd, idx, pt, scratch))
1016			free_px(vm, pt);
1017	} while (idx++, --len);
1018
1019	return start;
1020}
1021
1022static void gen8_ppgtt_clear(struct i915_address_space *vm,
1023			     u64 start, u64 length)
 
 
 
 
 
 
 
 
 
1024{
1025	GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
1026	GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
1027	GEM_BUG_ON(range_overflows(start, length, vm->total));
1028
1029	start >>= GEN8_PTE_SHIFT;
1030	length >>= GEN8_PTE_SHIFT;
1031	GEM_BUG_ON(length == 0);
1032
1033	__gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd,
1034			   start, start + length, vm->top);
1035}
1036
1037static int __gen8_ppgtt_alloc(struct i915_address_space * const vm,
1038			      struct i915_page_directory * const pd,
1039			      u64 * const start, const u64 end, int lvl)
1040{
1041	const struct i915_page_scratch * const scratch = &vm->scratch[lvl];
1042	struct i915_page_table *alloc = NULL;
1043	unsigned int idx, len;
1044	int ret = 0;
1045
1046	GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
1047
1048	len = gen8_pd_range(*start, end, lvl--, &idx);
1049	DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n",
1050	    __func__, vm, lvl + 1, *start, end,
1051	    idx, len, atomic_read(px_used(pd)));
1052	GEM_BUG_ON(!len || (idx + len - 1) >> gen8_pd_shift(1));
1053
1054	spin_lock(&pd->lock);
1055	GEM_BUG_ON(!atomic_read(px_used(pd))); /* Must be pinned! */
1056	do {
1057		struct i915_page_table *pt = pd->entry[idx];
1058
1059		if (!pt) {
1060			spin_unlock(&pd->lock);
1061
1062			DBG("%s(%p):{ lvl:%d, idx:%d } allocating new tree\n",
1063			    __func__, vm, lvl + 1, idx);
1064
1065			pt = fetch_and_zero(&alloc);
1066			if (lvl) {
1067				if (!pt) {
1068					pt = &alloc_pd(vm)->pt;
1069					if (IS_ERR(pt)) {
1070						ret = PTR_ERR(pt);
1071						goto out;
1072					}
1073				}
1074
1075				fill_px(pt, vm->scratch[lvl].encode);
1076			} else {
1077				if (!pt) {
1078					pt = alloc_pt(vm);
1079					if (IS_ERR(pt)) {
1080						ret = PTR_ERR(pt);
1081						goto out;
1082					}
1083				}
1084
1085				if (intel_vgpu_active(vm->i915) ||
1086				    gen8_pt_count(*start, end) < I915_PDES)
1087					fill_px(pt, vm->scratch[lvl].encode);
1088			}
1089
1090			spin_lock(&pd->lock);
1091			if (likely(!pd->entry[idx]))
1092				set_pd_entry(pd, idx, pt);
1093			else
1094				alloc = pt, pt = pd->entry[idx];
1095		}
 
1096
1097		if (lvl) {
1098			atomic_inc(&pt->used);
1099			spin_unlock(&pd->lock);
1100
1101			ret = __gen8_ppgtt_alloc(vm, as_pd(pt),
1102						 start, end, lvl);
1103			if (unlikely(ret)) {
1104				if (release_pd_entry(pd, idx, pt, scratch))
1105					free_px(vm, pt);
1106				goto out;
1107			}
1108
1109			spin_lock(&pd->lock);
1110			atomic_dec(&pt->used);
1111			GEM_BUG_ON(!atomic_read(&pt->used));
1112		} else {
1113			unsigned int count = gen8_pt_count(*start, end);
1114
1115			DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } inserting pte\n",
1116			    __func__, vm, lvl, *start, end,
1117			    gen8_pd_index(*start, 0), count,
1118			    atomic_read(&pt->used));
1119
1120			atomic_add(count, &pt->used);
1121			/* All other pdes may be simultaneously removed */
1122			GEM_BUG_ON(atomic_read(&pt->used) > 2 * I915_PDES);
1123			*start += count;
1124		}
1125	} while (idx++, --len);
1126	spin_unlock(&pd->lock);
1127out:
1128	if (alloc)
1129		free_px(vm, alloc);
 
 
 
 
 
 
1130	return ret;
1131}
1132
1133static int gen8_ppgtt_alloc(struct i915_address_space *vm,
1134			    u64 start, u64 length)
1135{
1136	u64 from;
1137	int err;
1138
1139	GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
1140	GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
1141	GEM_BUG_ON(range_overflows(start, length, vm->total));
1142
1143	start >>= GEN8_PTE_SHIFT;
1144	length >>= GEN8_PTE_SHIFT;
1145	GEM_BUG_ON(length == 0);
1146	from = start;
1147
1148	err = __gen8_ppgtt_alloc(vm, i915_vm_to_ppgtt(vm)->pd,
1149				 &start, start + length, vm->top);
1150	if (unlikely(err && from != start))
1151		__gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd,
1152				   from, start, vm->top);
1153
1154	return err;
1155}
1156
1157static inline struct sgt_dma {
1158	struct scatterlist *sg;
1159	dma_addr_t dma, max;
1160} sgt_dma(struct i915_vma *vma) {
1161	struct scatterlist *sg = vma->pages->sgl;
1162	dma_addr_t addr = sg_dma_address(sg);
1163	return (struct sgt_dma) { sg, addr, addr + sg->length };
1164}
1165
1166static __always_inline u64
1167gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
1168		      struct i915_page_directory *pdp,
1169		      struct sgt_dma *iter,
1170		      u64 idx,
1171		      enum i915_cache_level cache_level,
1172		      u32 flags)
1173{
1174	struct i915_page_directory *pd;
1175	const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
1176	gen8_pte_t *vaddr;
1177
1178	pd = i915_pd_entry(pdp, gen8_pd_index(idx, 2));
1179	vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
1180	do {
1181		vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma;
1182
1183		iter->dma += I915_GTT_PAGE_SIZE;
1184		if (iter->dma >= iter->max) {
1185			iter->sg = __sg_next(iter->sg);
1186			if (!iter->sg) {
1187				idx = 0;
1188				break;
1189			}
1190
1191			iter->dma = sg_dma_address(iter->sg);
1192			iter->max = iter->dma + iter->sg->length;
1193		}
1194
1195		if (gen8_pd_index(++idx, 0) == 0) {
1196			if (gen8_pd_index(idx, 1) == 0) {
1197				/* Limited by sg length for 3lvl */
1198				if (gen8_pd_index(idx, 2) == 0)
1199					break;
1200
1201				pd = pdp->entry[gen8_pd_index(idx, 2)];
 
 
 
 
 
1202			}
1203
1204			kunmap_atomic(vaddr);
1205			vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
1206		}
1207	} while (1);
1208	kunmap_atomic(vaddr);
1209
1210	return idx;
1211}
1212
1213static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
1214				   struct sgt_dma *iter,
1215				   enum i915_cache_level cache_level,
1216				   u32 flags)
1217{
1218	const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
1219	u64 start = vma->node.start;
1220	dma_addr_t rem = iter->sg->length;
1221
1222	GEM_BUG_ON(!i915_vm_is_4lvl(vma->vm));
1223
1224	do {
1225		struct i915_page_directory * const pdp =
1226			gen8_pdp_for_page_address(vma->vm, start);
1227		struct i915_page_directory * const pd =
1228			i915_pd_entry(pdp, __gen8_pte_index(start, 2));
1229		gen8_pte_t encode = pte_encode;
1230		unsigned int maybe_64K = -1;
1231		unsigned int page_size;
1232		gen8_pte_t *vaddr;
1233		u16 index;
1234
1235		if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_2M &&
1236		    IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) &&
1237		    rem >= I915_GTT_PAGE_SIZE_2M &&
1238		    !__gen8_pte_index(start, 0)) {
1239			index = __gen8_pte_index(start, 1);
1240			encode |= GEN8_PDE_PS_2M;
1241			page_size = I915_GTT_PAGE_SIZE_2M;
1242
1243			vaddr = kmap_atomic_px(pd);
1244		} else {
1245			struct i915_page_table *pt =
1246				i915_pt_entry(pd, __gen8_pte_index(start, 1));
1247
1248			index = __gen8_pte_index(start, 0);
1249			page_size = I915_GTT_PAGE_SIZE;
1250
1251			if (!index &&
1252			    vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K &&
1253			    IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
1254			    (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
1255			     rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE))
1256				maybe_64K = __gen8_pte_index(start, 1);
1257
1258			vaddr = kmap_atomic_px(pt);
1259		}
1260
1261		do {
1262			GEM_BUG_ON(iter->sg->length < page_size);
1263			vaddr[index++] = encode | iter->dma;
1264
1265			start += page_size;
1266			iter->dma += page_size;
1267			rem -= page_size;
1268			if (iter->dma >= iter->max) {
1269				iter->sg = __sg_next(iter->sg);
1270				if (!iter->sg)
1271					break;
1272
1273				rem = iter->sg->length;
1274				iter->dma = sg_dma_address(iter->sg);
1275				iter->max = iter->dma + rem;
1276
1277				if (maybe_64K != -1 && index < I915_PDES &&
1278				    !(IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
1279				      (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
1280				       rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE)))
1281					maybe_64K = -1;
1282
1283				if (unlikely(!IS_ALIGNED(iter->dma, page_size)))
1284					break;
1285			}
1286		} while (rem >= page_size && index < I915_PDES);
1287
1288		kunmap_atomic(vaddr);
1289
1290		/*
1291		 * Is it safe to mark the 2M block as 64K? -- Either we have
1292		 * filled whole page-table with 64K entries, or filled part of
1293		 * it and have reached the end of the sg table and we have
1294		 * enough padding.
1295		 */
1296		if (maybe_64K != -1 &&
1297		    (index == I915_PDES ||
1298		     (i915_vm_has_scratch_64K(vma->vm) &&
1299		      !iter->sg && IS_ALIGNED(vma->node.start +
1300					      vma->node.size,
1301					      I915_GTT_PAGE_SIZE_2M)))) {
1302			vaddr = kmap_atomic_px(pd);
1303			vaddr[maybe_64K] |= GEN8_PDE_IPS_64K;
1304			kunmap_atomic(vaddr);
1305			page_size = I915_GTT_PAGE_SIZE_64K;
1306
1307			/*
1308			 * We write all 4K page entries, even when using 64K
1309			 * pages. In order to verify that the HW isn't cheating
1310			 * by using the 4K PTE instead of the 64K PTE, we want
1311			 * to remove all the surplus entries. If the HW skipped
1312			 * the 64K PTE, it will read/write into the scratch page
1313			 * instead - which we detect as missing results during
1314			 * selftests.
1315			 */
1316			if (I915_SELFTEST_ONLY(vma->vm->scrub_64K)) {
1317				u16 i;
1318
1319				encode = vma->vm->scratch[0].encode;
1320				vaddr = kmap_atomic_px(i915_pt_entry(pd, maybe_64K));
1321
1322				for (i = 1; i < index; i += 16)
1323					memset64(vaddr + i, encode, 15);
 
 
 
1324
1325				kunmap_atomic(vaddr);
1326			}
1327		}
1328
1329		vma->page_sizes.gtt |= page_size;
1330	} while (iter->sg);
 
1331}
1332
1333static void gen8_ppgtt_insert(struct i915_address_space *vm,
1334			      struct i915_vma *vma,
1335			      enum i915_cache_level cache_level,
1336			      u32 flags)
1337{
1338	struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
1339	struct sgt_dma iter = sgt_dma(vma);
1340
1341	if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
1342		gen8_ppgtt_insert_huge(vma, &iter, cache_level, flags);
1343	} else  {
1344		u64 idx = vma->node.start >> GEN8_PTE_SHIFT;
1345
1346		do {
1347			struct i915_page_directory * const pdp =
1348				gen8_pdp_for_page_index(vm, idx);
1349
1350			idx = gen8_ppgtt_insert_pte(ppgtt, pdp, &iter, idx,
1351						    cache_level, flags);
1352		} while (idx);
1353
1354		vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
1355	}
1356}
1357
1358static int gen8_init_scratch(struct i915_address_space *vm)
 
 
1359{
 
 
1360	int ret;
1361	int i;
1362
1363	/*
1364	 * If everybody agrees to not to write into the scratch page,
1365	 * we can reuse it for all vm, keeping contexts and processes separate.
 
 
 
1366	 */
1367	if (vm->has_read_only &&
1368	    vm->i915->kernel_context &&
1369	    vm->i915->kernel_context->vm) {
1370		struct i915_address_space *clone = vm->i915->kernel_context->vm;
1371
1372		GEM_BUG_ON(!clone->has_read_only);
1373
1374		vm->scratch_order = clone->scratch_order;
1375		memcpy(vm->scratch, clone->scratch, sizeof(vm->scratch));
1376		px_dma(&vm->scratch[0]) = 0; /* no xfer of ownership */
1377		return 0;
1378	}
1379
1380	ret = setup_scratch_page(vm, __GFP_HIGHMEM);
 
1381	if (ret)
1382		return ret;
1383
1384	vm->scratch[0].encode =
1385		gen8_pte_encode(px_dma(&vm->scratch[0]),
1386				I915_CACHE_LLC, vm->has_read_only);
1387
1388	for (i = 1; i <= vm->top; i++) {
1389		if (unlikely(setup_page_dma(vm, px_base(&vm->scratch[i]))))
1390			goto free_scratch;
1391
1392		fill_px(&vm->scratch[i], vm->scratch[i - 1].encode);
1393		vm->scratch[i].encode =
1394			gen8_pde_encode(px_dma(&vm->scratch[i]),
1395					I915_CACHE_LLC);
1396	}
 
 
1397
1398	return 0;
1399
1400free_scratch:
1401	free_scratch(vm);
1402	return -ENOMEM;
1403}
1404
1405static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
 
 
1406{
1407	struct i915_address_space *vm = &ppgtt->vm;
1408	struct i915_page_directory *pd = ppgtt->pd;
1409	unsigned int idx;
1410
1411	GEM_BUG_ON(vm->top != 2);
1412	GEM_BUG_ON(gen8_pd_top_count(vm) != GEN8_3LVL_PDPES);
 
 
 
 
 
 
 
 
 
 
 
 
 
1413
1414	for (idx = 0; idx < GEN8_3LVL_PDPES; idx++) {
1415		struct i915_page_directory *pde;
 
 
1416
1417		pde = alloc_pd(vm);
1418		if (IS_ERR(pde))
1419			return PTR_ERR(pde);
1420
1421		fill_px(pde, vm->scratch[1].encode);
1422		set_pd_entry(pd, idx, pde);
1423		atomic_inc(px_used(pde)); /* keep pinned */
 
 
 
 
 
 
 
 
 
 
1424	}
1425
1426	return 0;
1427}
1428
1429static void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt)
1430{
1431	struct drm_i915_private *i915 = gt->i915;
1432
1433	ppgtt->vm.gt = gt;
1434	ppgtt->vm.i915 = i915;
1435	ppgtt->vm.dma = &i915->drm.pdev->dev;
1436	ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size);
1437
1438	i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
1439
1440	ppgtt->vm.vma_ops.bind_vma    = ppgtt_bind_vma;
1441	ppgtt->vm.vma_ops.unbind_vma  = ppgtt_unbind_vma;
1442	ppgtt->vm.vma_ops.set_pages   = ppgtt_set_pages;
1443	ppgtt->vm.vma_ops.clear_pages = clear_pages;
1444}
1445
1446static struct i915_page_directory *
1447gen8_alloc_top_pd(struct i915_address_space *vm)
1448{
1449	const unsigned int count = gen8_pd_top_count(vm);
1450	struct i915_page_directory *pd;
1451
1452	GEM_BUG_ON(count > ARRAY_SIZE(pd->entry));
 
1453
1454	pd = __alloc_pd(offsetof(typeof(*pd), entry[count]));
1455	if (unlikely(!pd))
1456		return ERR_PTR(-ENOMEM);
1457
1458	if (unlikely(setup_page_dma(vm, px_base(pd)))) {
1459		kfree(pd);
1460		return ERR_PTR(-ENOMEM);
1461	}
1462
1463	fill_page_dma(px_base(pd), vm->scratch[vm->top].encode, count);
1464	atomic_inc(px_used(pd)); /* mark as pinned */
1465	return pd;
1466}
1467
1468/*
1469 * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
1470 * with a net effect resembling a 2-level page table in normal x86 terms. Each
1471 * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
1472 * space.
1473 *
1474 */
1475static struct i915_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
1476{
1477	struct i915_ppgtt *ppgtt;
1478	int err;
 
 
1479
1480	ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
1481	if (!ppgtt)
1482		return ERR_PTR(-ENOMEM);
1483
1484	ppgtt_init(ppgtt, &i915->gt);
1485	ppgtt->vm.top = i915_vm_is_4lvl(&ppgtt->vm) ? 3 : 2;
1486
1487	/*
1488	 * From bdw, there is hw support for read-only pages in the PPGTT.
1489	 *
1490	 * Gen11 has HSDES#:1807136187 unresolved. Disable ro support
1491	 * for now.
1492	 */
1493	ppgtt->vm.has_read_only = INTEL_GEN(i915) != 11;
1494
1495	/* There are only few exceptions for gen >=6. chv and bxt.
1496	 * And we are not sure about the latter so play safe for now.
1497	 */
1498	if (IS_CHERRYVIEW(i915) || IS_BROXTON(i915))
1499		ppgtt->vm.pt_kmap_wc = true;
1500
1501	err = gen8_init_scratch(&ppgtt->vm);
1502	if (err)
1503		goto err_free;
1504
1505	ppgtt->pd = gen8_alloc_top_pd(&ppgtt->vm);
1506	if (IS_ERR(ppgtt->pd)) {
1507		err = PTR_ERR(ppgtt->pd);
1508		goto err_free_scratch;
1509	}
1510
1511	if (!i915_vm_is_4lvl(&ppgtt->vm)) {
1512		if (intel_vgpu_active(i915)) {
1513			err = gen8_preallocate_top_level_pdp(ppgtt);
1514			if (err)
1515				goto err_free_pd;
1516		}
1517	}
1518
1519	ppgtt->vm.insert_entries = gen8_ppgtt_insert;
1520	ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc;
1521	ppgtt->vm.clear_range = gen8_ppgtt_clear;
1522
1523	if (intel_vgpu_active(i915))
1524		gen8_ppgtt_notify_vgt(ppgtt, true);
1525
1526	ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
1527
1528	return ppgtt;
1529
1530err_free_pd:
1531	__gen8_ppgtt_cleanup(&ppgtt->vm, ppgtt->pd,
1532			     gen8_pd_top_count(&ppgtt->vm), ppgtt->vm.top);
1533err_free_scratch:
1534	free_scratch(&ppgtt->vm);
1535err_free:
1536	kfree(ppgtt);
1537	return ERR_PTR(err);
1538}
1539
1540/* Write pde (index) from the page directory @pd to the page table @pt */
1541static inline void gen6_write_pde(const struct gen6_ppgtt *ppgtt,
1542				  const unsigned int pde,
1543				  const struct i915_page_table *pt)
1544{
1545	/* Caller needs to make sure the write completes if necessary */
1546	iowrite32(GEN6_PDE_ADDR_ENCODE(px_dma(pt)) | GEN6_PDE_VALID,
1547		  ppgtt->pd_addr + pde);
1548}
1549
1550static void gen7_ppgtt_enable(struct intel_gt *gt)
1551{
1552	struct drm_i915_private *i915 = gt->i915;
1553	struct intel_uncore *uncore = gt->uncore;
1554	struct intel_engine_cs *engine;
1555	enum intel_engine_id id;
1556	u32 ecochk;
1557
1558	intel_uncore_rmw(uncore, GAC_ECO_BITS, 0, ECOBITS_PPGTT_CACHE64B);
 
1559
1560	ecochk = intel_uncore_read(uncore, GAM_ECOCHK);
1561	if (IS_HASWELL(i915)) {
1562		ecochk |= ECOCHK_PPGTT_WB_HSW;
1563	} else {
1564		ecochk |= ECOCHK_PPGTT_LLC_IVB;
1565		ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
1566	}
1567	intel_uncore_write(uncore, GAM_ECOCHK, ecochk);
1568
1569	for_each_engine(engine, i915, id) {
 
1570		/* GFX_MODE is per-ring on gen7+ */
1571		ENGINE_WRITE(engine,
1572			     RING_MODE_GEN7,
1573			     _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
 
 
 
 
 
 
 
 
1574	}
 
 
1575}
1576
1577static void gen6_ppgtt_enable(struct intel_gt *gt)
1578{
1579	struct intel_uncore *uncore = gt->uncore;
 
 
 
 
 
 
 
 
1580
1581	intel_uncore_rmw(uncore,
1582			 GAC_ECO_BITS,
1583			 0,
1584			 ECOBITS_SNB_BIT | ECOBITS_PPGTT_CACHE64B);
1585
1586	intel_uncore_rmw(uncore,
1587			 GAB_CTL,
1588			 0,
1589			 GAB_CTL_CONT_AFTER_PAGEFAULT);
1590
1591	intel_uncore_rmw(uncore,
1592			 GAM_ECOCHK,
1593			 0,
1594			 ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
1595
1596	if (HAS_PPGTT(uncore->i915)) /* may be disabled for VT-d */
1597		intel_uncore_write(uncore,
1598				   GFX_MODE,
1599				   _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
 
 
 
1600}
1601
1602/* PPGTT support for Sandybdrige/Gen6 and later */
1603static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
1604				   u64 start, u64 length)
1605{
1606	struct gen6_ppgtt * const ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
1607	const unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
1608	const gen6_pte_t scratch_pte = vm->scratch[0].encode;
1609	unsigned int pde = first_entry / GEN6_PTES;
1610	unsigned int pte = first_entry % GEN6_PTES;
1611	unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
 
 
 
 
 
 
1612
1613	while (num_entries) {
1614		struct i915_page_table * const pt =
1615			i915_pt_entry(ppgtt->base.pd, pde++);
1616		const unsigned int count = min(num_entries, GEN6_PTES - pte);
1617		gen6_pte_t *vaddr;
1618
1619		GEM_BUG_ON(px_base(pt) == px_base(&vm->scratch[1]));
1620
1621		num_entries -= count;
1622
1623		GEM_BUG_ON(count > atomic_read(&pt->used));
1624		if (!atomic_sub_return(count, &pt->used))
1625			ppgtt->scan_for_unused_pt = true;
1626
1627		/*
1628		 * Note that the hw doesn't support removing PDE on the fly
1629		 * (they are cached inside the context with no means to
1630		 * invalidate the cache), so we can only reset the PTE
1631		 * entries back to scratch.
1632		 */
1633
1634		vaddr = kmap_atomic_px(pt);
1635		memset32(vaddr + pte, scratch_pte, count);
1636		kunmap_atomic(vaddr);
1637
1638		pte = 0;
 
 
 
 
 
 
 
1639	}
1640}
1641
1642static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
1643				      struct i915_vma *vma,
1644				      enum i915_cache_level cache_level,
1645				      u32 flags)
1646{
1647	struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1648	struct i915_page_directory * const pd = ppgtt->pd;
1649	unsigned first_entry = vma->node.start / I915_GTT_PAGE_SIZE;
1650	unsigned act_pt = first_entry / GEN6_PTES;
1651	unsigned act_pte = first_entry % GEN6_PTES;
1652	const u32 pte_encode = vm->pte_encode(0, cache_level, flags);
1653	struct sgt_dma iter = sgt_dma(vma);
1654	gen6_pte_t *vaddr;
1655
1656	GEM_BUG_ON(pd->entry[act_pt] == &vm->scratch[1]);
1657
1658	vaddr = kmap_atomic_px(i915_pt_entry(pd, act_pt));
1659	do {
1660		vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
1661
1662		iter.dma += I915_GTT_PAGE_SIZE;
1663		if (iter.dma == iter.max) {
1664			iter.sg = __sg_next(iter.sg);
1665			if (!iter.sg)
1666				break;
1667
1668			iter.dma = sg_dma_address(iter.sg);
1669			iter.max = iter.dma + iter.sg->length;
1670		}
1671
1672		if (++act_pte == GEN6_PTES) {
1673			kunmap_atomic(vaddr);
1674			vaddr = kmap_atomic_px(i915_pt_entry(pd, ++act_pt));
1675			act_pte = 0;
1676		}
1677	} while (1);
1678	kunmap_atomic(vaddr);
1679
1680	vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
1681}
1682
1683static int gen6_alloc_va_range(struct i915_address_space *vm,
1684			       u64 start, u64 length)
1685{
1686	struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
1687	struct i915_page_directory * const pd = ppgtt->base.pd;
1688	struct i915_page_table *pt, *alloc = NULL;
1689	intel_wakeref_t wakeref;
1690	u64 from = start;
1691	unsigned int pde;
1692	bool flush = false;
1693	int ret = 0;
1694
1695	wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
1696
1697	spin_lock(&pd->lock);
1698	gen6_for_each_pde(pt, pd, start, length, pde) {
1699		const unsigned int count = gen6_pte_count(start, length);
1700
1701		if (px_base(pt) == px_base(&vm->scratch[1])) {
1702			spin_unlock(&pd->lock);
1703
1704			pt = fetch_and_zero(&alloc);
1705			if (!pt)
1706				pt = alloc_pt(vm);
1707			if (IS_ERR(pt)) {
1708				ret = PTR_ERR(pt);
1709				goto unwind_out;
1710			}
1711
1712			fill32_px(pt, vm->scratch[0].encode);
1713
1714			spin_lock(&pd->lock);
1715			if (pd->entry[pde] == &vm->scratch[1]) {
1716				pd->entry[pde] = pt;
1717				if (i915_vma_is_bound(ppgtt->vma,
1718						      I915_VMA_GLOBAL_BIND)) {
1719					gen6_write_pde(ppgtt, pde, pt);
1720					flush = true;
1721				}
1722			} else {
1723				alloc = pt;
1724				pt = pd->entry[pde];
1725			}
1726		}
1727
1728		atomic_add(count, &pt->used);
1729	}
1730	spin_unlock(&pd->lock);
1731
1732	if (flush) {
1733		mark_tlbs_dirty(&ppgtt->base);
1734		gen6_ggtt_invalidate(vm->gt->ggtt);
1735	}
1736
1737	goto out;
1738
1739unwind_out:
1740	gen6_ppgtt_clear_range(vm, from, start - from);
1741out:
1742	if (alloc)
1743		free_px(vm, alloc);
1744	intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
1745	return ret;
1746}
1747
1748static int gen6_ppgtt_init_scratch(struct gen6_ppgtt *ppgtt)
1749{
1750	struct i915_address_space * const vm = &ppgtt->base.vm;
1751	struct i915_page_directory * const pd = ppgtt->base.pd;
1752	int ret;
1753
1754	ret = setup_scratch_page(vm, __GFP_HIGHMEM);
1755	if (ret)
1756		return ret;
1757
1758	vm->scratch[0].encode =
1759		vm->pte_encode(px_dma(&vm->scratch[0]),
1760			       I915_CACHE_NONE, PTE_READ_ONLY);
1761
1762	if (unlikely(setup_page_dma(vm, px_base(&vm->scratch[1])))) {
1763		cleanup_scratch_page(vm);
1764		return -ENOMEM;
 
 
1765	}
1766
1767	fill32_px(&vm->scratch[1], vm->scratch[0].encode);
1768	memset_p(pd->entry, &vm->scratch[1], I915_PDES);
1769
1770	return 0;
1771}
1772
1773static void gen6_ppgtt_free_pd(struct gen6_ppgtt *ppgtt)
1774{
1775	struct i915_page_directory * const pd = ppgtt->base.pd;
1776	struct i915_page_dma * const scratch =
1777		px_base(&ppgtt->base.vm.scratch[1]);
1778	struct i915_page_table *pt;
1779	u32 pde;
1780
1781	gen6_for_all_pdes(pt, pd, pde)
1782		if (px_base(pt) != scratch)
1783			free_px(&ppgtt->base.vm, pt);
 
1784}
1785
1786static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
1787{
1788	struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
1789	struct drm_i915_private *i915 = vm->i915;
1790
1791	/* FIXME remove the struct_mutex to bring the locking under control */
1792	mutex_lock(&i915->drm.struct_mutex);
1793	i915_vma_destroy(ppgtt->vma);
1794	mutex_unlock(&i915->drm.struct_mutex);
1795
1796	gen6_ppgtt_free_pd(ppgtt);
1797	free_scratch(vm);
1798	kfree(ppgtt->base.pd);
1799}
1800
1801static int pd_vma_set_pages(struct i915_vma *vma)
1802{
1803	vma->pages = ERR_PTR(-ENODEV);
1804	return 0;
1805}
 
 
 
1806
1807static void pd_vma_clear_pages(struct i915_vma *vma)
1808{
1809	GEM_BUG_ON(!vma->pages);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1810
1811	vma->pages = NULL;
 
 
 
 
 
 
 
 
1812}
1813
1814static int pd_vma_bind(struct i915_vma *vma,
1815		       enum i915_cache_level cache_level,
1816		       u32 unused)
1817{
1818	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm);
1819	struct gen6_ppgtt *ppgtt = vma->private;
1820	u32 ggtt_offset = i915_ggtt_offset(vma) / I915_GTT_PAGE_SIZE;
1821	struct i915_page_table *pt;
1822	unsigned int pde;
1823
1824	px_base(ppgtt->base.pd)->ggtt_offset = ggtt_offset * sizeof(gen6_pte_t);
1825	ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm + ggtt_offset;
1826
1827	gen6_for_all_pdes(pt, ppgtt->base.pd, pde)
1828		gen6_write_pde(ppgtt, pde, pt);
1829
1830	mark_tlbs_dirty(&ppgtt->base);
1831	gen6_ggtt_invalidate(ggtt);
 
 
 
 
 
1832
1833	return 0;
1834}
1835
1836static void pd_vma_unbind(struct i915_vma *vma)
1837{
1838	struct gen6_ppgtt *ppgtt = vma->private;
1839	struct i915_page_directory * const pd = ppgtt->base.pd;
1840	struct i915_page_dma * const scratch =
1841		px_base(&ppgtt->base.vm.scratch[1]);
1842	struct i915_page_table *pt;
1843	unsigned int pde;
1844
1845	if (!ppgtt->scan_for_unused_pt)
1846		return;
 
1847
1848	/* Free all no longer used page tables */
1849	gen6_for_all_pdes(pt, ppgtt->base.pd, pde) {
1850		if (px_base(pt) == scratch || atomic_read(&pt->used))
1851			continue;
 
1852
1853		free_px(&ppgtt->base.vm, pt);
1854		pd->entry[pde] = scratch;
 
 
 
 
1855	}
1856
1857	ppgtt->scan_for_unused_pt = false;
1858}
1859
1860static const struct i915_vma_ops pd_vma_ops = {
1861	.set_pages = pd_vma_set_pages,
1862	.clear_pages = pd_vma_clear_pages,
1863	.bind_vma = pd_vma_bind,
1864	.unbind_vma = pd_vma_unbind,
1865};
1866
1867static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size)
1868{
1869	struct drm_i915_private *i915 = ppgtt->base.vm.i915;
1870	struct i915_ggtt *ggtt = ppgtt->base.vm.gt->ggtt;
1871	struct i915_vma *vma;
1872
1873	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
1874	GEM_BUG_ON(size > ggtt->vm.total);
1875
1876	vma = i915_vma_alloc();
1877	if (!vma)
1878		return ERR_PTR(-ENOMEM);
1879
1880	i915_active_init(i915, &vma->active, NULL, NULL);
 
 
 
1881
1882	vma->vm = &ggtt->vm;
1883	vma->ops = &pd_vma_ops;
1884	vma->private = ppgtt;
1885
1886	vma->size = size;
1887	vma->fence_size = size;
1888	vma->flags = I915_VMA_GGTT;
1889	vma->ggtt_view.type = I915_GGTT_VIEW_ROTATED; /* prevent fencing */
1890
1891	INIT_LIST_HEAD(&vma->obj_link);
1892	INIT_LIST_HEAD(&vma->closed_link);
1893
1894	mutex_lock(&vma->vm->mutex);
1895	list_add(&vma->vm_link, &vma->vm->unbound_list);
1896	mutex_unlock(&vma->vm->mutex);
1897
1898	return vma;
1899}
1900
1901int gen6_ppgtt_pin(struct i915_ppgtt *base)
1902{
1903	struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
1904	int err;
1905
1906	GEM_BUG_ON(ppgtt->base.vm.closed);
1907
1908	/*
1909	 * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt
1910	 * which will be pinned into every active context.
1911	 * (When vma->pin_count becomes atomic, I expect we will naturally
1912	 * need a larger, unpacked, type and kill this redundancy.)
1913	 */
1914	if (ppgtt->pin_count++)
1915		return 0;
1916
1917	/*
1918	 * PPGTT PDEs reside in the GGTT and consists of 512 entries. The
1919	 * allocator works in address space sizes, so it's multiplied by page
1920	 * size. We allocate at the top of the GTT to avoid fragmentation.
1921	 */
1922	err = i915_vma_pin(ppgtt->vma,
1923			   0, GEN6_PD_ALIGN,
1924			   PIN_GLOBAL | PIN_HIGH);
1925	if (err)
1926		goto unpin;
1927
1928	return 0;
 
 
 
 
 
 
 
 
 
 
 
1929
1930unpin:
1931	ppgtt->pin_count = 0;
1932	return err;
1933}
1934
1935void gen6_ppgtt_unpin(struct i915_ppgtt *base)
1936{
1937	struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
 
 
1938
1939	GEM_BUG_ON(!ppgtt->pin_count);
1940	if (--ppgtt->pin_count)
1941		return;
 
 
 
1942
1943	i915_vma_unpin(ppgtt->vma);
1944}
1945
1946void gen6_ppgtt_unpin_all(struct i915_ppgtt *base)
1947{
1948	struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
1949
1950	if (!ppgtt->pin_count)
1951		return;
 
1952
1953	ppgtt->pin_count = 0;
1954	i915_vma_unpin(ppgtt->vma);
1955}
1956
1957static struct i915_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
1958{
1959	struct i915_ggtt * const ggtt = &i915->ggtt;
1960	struct gen6_ppgtt *ppgtt;
1961	int err;
1962
1963	ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
1964	if (!ppgtt)
1965		return ERR_PTR(-ENOMEM);
1966
1967	ppgtt_init(&ppgtt->base, &i915->gt);
1968	ppgtt->base.vm.top = 1;
1969
1970	ppgtt->base.vm.allocate_va_range = gen6_alloc_va_range;
1971	ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range;
1972	ppgtt->base.vm.insert_entries = gen6_ppgtt_insert_entries;
1973	ppgtt->base.vm.cleanup = gen6_ppgtt_cleanup;
1974
1975	ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode;
 
1976
1977	ppgtt->base.pd = __alloc_pd(sizeof(*ppgtt->base.pd));
1978	if (!ppgtt->base.pd) {
1979		err = -ENOMEM;
1980		goto err_free;
1981	}
1982
1983	err = gen6_ppgtt_init_scratch(ppgtt);
1984	if (err)
1985		goto err_pd;
1986
1987	ppgtt->vma = pd_vma_create(ppgtt, GEN6_PD_SIZE);
1988	if (IS_ERR(ppgtt->vma)) {
1989		err = PTR_ERR(ppgtt->vma);
1990		goto err_scratch;
 
 
 
 
 
 
 
1991	}
1992
1993	return &ppgtt->base;
1994
1995err_scratch:
1996	free_scratch(&ppgtt->base.vm);
1997err_pd:
1998	kfree(ppgtt->base.pd);
1999err_free:
2000	kfree(ppgtt);
2001	return ERR_PTR(err);
2002}
2003
2004static void gtt_write_workarounds(struct intel_gt *gt)
 
 
 
2005{
2006	struct drm_i915_private *i915 = gt->i915;
2007	struct intel_uncore *uncore = gt->uncore;
 
2008
2009	/* This function is for gtt related workarounds. This function is
2010	 * called on driver load and after a GPU reset, so you can place
2011	 * workarounds here even if they get overwritten by GPU reset.
2012	 */
2013	/* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl,cnl,icl */
2014	if (IS_BROADWELL(i915))
2015		intel_uncore_write(uncore,
2016				   GEN8_L3_LRA_1_GPGPU,
2017				   GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
2018	else if (IS_CHERRYVIEW(i915))
2019		intel_uncore_write(uncore,
2020				   GEN8_L3_LRA_1_GPGPU,
2021				   GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
2022	else if (IS_GEN9_LP(i915))
2023		intel_uncore_write(uncore,
2024				   GEN8_L3_LRA_1_GPGPU,
2025				   GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
2026	else if (INTEL_GEN(i915) >= 9)
2027		intel_uncore_write(uncore,
2028				   GEN8_L3_LRA_1_GPGPU,
2029				   GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
2030
2031	/*
2032	 * To support 64K PTEs we need to first enable the use of the
2033	 * Intermediate-Page-Size(IPS) bit of the PDE field via some magical
2034	 * mmio, otherwise the page-walker will simply ignore the IPS bit. This
2035	 * shouldn't be needed after GEN10.
2036	 *
2037	 * 64K pages were first introduced from BDW+, although technically they
2038	 * only *work* from gen9+. For pre-BDW we instead have the option for
2039	 * 32K pages, but we don't currently have any support for it in our
2040	 * driver.
2041	 */
2042	if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K) &&
2043	    INTEL_GEN(i915) <= 10)
2044		intel_uncore_rmw(uncore,
2045				 GEN8_GAMW_ECO_DEV_RW_IA,
2046				 0,
2047				 GAMW_ECO_ENABLE_64K_IPS_FIELD);
2048
2049	if (IS_GEN_RANGE(i915, 8, 11)) {
2050		bool can_use_gtt_cache = true;
2051
2052		/*
2053		 * According to the BSpec if we use 2M/1G pages then we also
2054		 * need to disable the GTT cache. At least on BDW we can see
2055		 * visual corruption when using 2M pages, and not disabling the
2056		 * GTT cache.
2057		 */
2058		if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_2M))
2059			can_use_gtt_cache = false;
2060
2061		/* WaGttCachingOffByDefault */
2062		intel_uncore_write(uncore,
2063				   HSW_GTT_CACHE_EN,
2064				   can_use_gtt_cache ? GTT_CACHE_EN_ALL : 0);
2065		WARN_ON_ONCE(can_use_gtt_cache &&
2066			     intel_uncore_read(uncore,
2067					       HSW_GTT_CACHE_EN) == 0);
2068	}
2069}
2070
2071int i915_ppgtt_init_hw(struct intel_gt *gt)
2072{
2073	struct drm_i915_private *i915 = gt->i915;
2074
2075	gtt_write_workarounds(gt);
2076
2077	if (IS_GEN(i915, 6))
2078		gen6_ppgtt_enable(gt);
2079	else if (IS_GEN(i915, 7))
2080		gen7_ppgtt_enable(gt);
 
 
 
 
2081
2082	return 0;
2083}
2084
2085static struct i915_ppgtt *
2086__ppgtt_create(struct drm_i915_private *i915)
2087{
2088	if (INTEL_GEN(i915) < 8)
2089		return gen6_ppgtt_create(i915);
2090	else
2091		return gen8_ppgtt_create(i915);
2092}
2093
2094struct i915_ppgtt *
2095i915_ppgtt_create(struct drm_i915_private *i915)
2096{
2097	struct i915_ppgtt *ppgtt;
2098
2099	ppgtt = __ppgtt_create(i915);
2100	if (IS_ERR(ppgtt))
2101		return ppgtt;
2102
2103	trace_i915_ppgtt_create(&ppgtt->vm);
2104
2105	return ppgtt;
2106}
2107
2108/* Certain Gen5 chipsets require require idling the GPU before
2109 * unmapping anything from the GTT when VT-d is enabled.
2110 */
2111static bool needs_idle_maps(struct drm_i915_private *dev_priv)
2112{
2113	/* Query intel_iommu to see if we need the workaround. Presumably that
2114	 * was loaded first.
2115	 */
2116	return IS_GEN(dev_priv, 5) && IS_MOBILE(dev_priv) && intel_vtd_active();
 
 
 
 
 
 
 
 
 
2117}
2118
2119static void ggtt_suspend_mappings(struct i915_ggtt *ggtt)
2120{
2121	struct drm_i915_private *i915 = ggtt->vm.i915;
2122
2123	/* Don't bother messing with faults pre GEN6 as we have little
2124	 * documentation supporting that it's a good idea.
2125	 */
2126	if (INTEL_GEN(i915) < 6)
2127		return;
2128
2129	intel_gt_check_and_clear_faults(ggtt->vm.gt);
2130
2131	ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
2132
2133	ggtt->invalidate(ggtt);
 
2134}
2135
2136void i915_gem_suspend_gtt_mappings(struct drm_i915_private *i915)
2137{
2138	ggtt_suspend_mappings(&i915->ggtt);
2139}
 
2140
2141int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
2142			       struct sg_table *pages)
2143{
2144	do {
2145		if (dma_map_sg_attrs(&obj->base.dev->pdev->dev,
2146				     pages->sgl, pages->nents,
2147				     PCI_DMA_BIDIRECTIONAL,
2148				     DMA_ATTR_NO_WARN))
2149			return 0;
2150
2151		/*
2152		 * If the DMA remap fails, one cause can be that we have
2153		 * too many objects pinned in a small remapping table,
2154		 * such as swiotlb. Incrementally purge all other objects and
2155		 * try again - if there are no more pages to remove from
2156		 * the DMA remapper, i915_gem_shrink will return 0.
 
 
 
 
 
 
 
 
 
 
2157		 */
2158		GEM_BUG_ON(obj->mm.pages == pages);
2159	} while (i915_gem_shrink(to_i915(obj->base.dev),
2160				 obj->base.size >> PAGE_SHIFT, NULL,
2161				 I915_SHRINK_BOUND |
2162				 I915_SHRINK_UNBOUND));
2163
2164	return -ENOSPC;
2165}
2166
2167static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
2168{
2169	writeq(pte, addr);
2170}
2171
2172static void gen8_ggtt_insert_page(struct i915_address_space *vm,
2173				  dma_addr_t addr,
2174				  u64 offset,
2175				  enum i915_cache_level level,
2176				  u32 unused)
2177{
2178	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2179	gen8_pte_t __iomem *pte =
2180		(gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
2181
2182	gen8_set_pte(pte, gen8_pte_encode(addr, level, 0));
 
2183
2184	ggtt->invalidate(ggtt);
2185}
2186
2187static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
2188				     struct i915_vma *vma,
2189				     enum i915_cache_level level,
2190				     u32 flags)
2191{
2192	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2193	struct sgt_iter sgt_iter;
2194	gen8_pte_t __iomem *gtt_entries;
2195	const gen8_pte_t pte_encode = gen8_pte_encode(0, level, 0);
2196	dma_addr_t addr;
2197
2198	/*
2199	 * Note that we ignore PTE_READ_ONLY here. The caller must be careful
2200	 * not to allow the user to override access to a read only page.
2201	 */
2202
2203	gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm;
2204	gtt_entries += vma->node.start / I915_GTT_PAGE_SIZE;
2205	for_each_sgt_dma(addr, sgt_iter, vma->pages)
2206		gen8_set_pte(gtt_entries++, pte_encode | addr);
2207
2208	/*
2209	 * We want to flush the TLBs only after we're certain all the PTE
2210	 * updates have finished.
2211	 */
2212	ggtt->invalidate(ggtt);
2213}
2214
2215static void gen6_ggtt_insert_page(struct i915_address_space *vm,
2216				  dma_addr_t addr,
2217				  u64 offset,
2218				  enum i915_cache_level level,
2219				  u32 flags)
2220{
2221	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2222	gen6_pte_t __iomem *pte =
2223		(gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2224
2225	iowrite32(vm->pte_encode(addr, level, flags), pte);
 
 
 
 
 
 
2226
2227	ggtt->invalidate(ggtt);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2228}
2229
2230/*
2231 * Binds an object into the global gtt with the specified cache level. The object
2232 * will be accessible to the GPU via commands whose operands reference offsets
2233 * within the global GTT as well as accessible by the GPU through the GMADR
2234 * mapped BAR (dev_priv->mm.gtt->gtt).
2235 */
2236static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
2237				     struct i915_vma *vma,
2238				     enum i915_cache_level level,
2239				     u32 flags)
2240{
2241	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2242	gen6_pte_t __iomem *entries = (gen6_pte_t __iomem *)ggtt->gsm;
2243	unsigned int i = vma->node.start / I915_GTT_PAGE_SIZE;
2244	struct sgt_iter iter;
 
 
2245	dma_addr_t addr;
2246	for_each_sgt_dma(addr, iter, vma->pages)
2247		iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]);
2248
2249	/*
2250	 * We want to flush the TLBs only after we're certain all the PTE
2251	 * updates have finished.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2252	 */
2253	ggtt->invalidate(ggtt);
2254}
2255
2256static void nop_clear_range(struct i915_address_space *vm,
2257			    u64 start, u64 length)
2258{
2259}
2260
2261static void gen8_ggtt_clear_range(struct i915_address_space *vm,
2262				  u64 start, u64 length)
2263{
2264	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2265	unsigned first_entry = start / I915_GTT_PAGE_SIZE;
2266	unsigned num_entries = length / I915_GTT_PAGE_SIZE;
2267	const gen8_pte_t scratch_pte = vm->scratch[0].encode;
2268	gen8_pte_t __iomem *gtt_base =
2269		(gen8_pte_t __iomem *)ggtt->gsm + first_entry;
2270	const int max_entries = ggtt_total_entries(ggtt) - first_entry;
 
2271	int i;
2272
2273	if (WARN(num_entries > max_entries,
2274		 "First entry = %d; Num entries = %d (max=%d)\n",
2275		 first_entry, num_entries, max_entries))
2276		num_entries = max_entries;
2277
 
 
 
2278	for (i = 0; i < num_entries; i++)
2279		gen8_set_pte(&gtt_base[i], scratch_pte);
2280}
2281
2282static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
2283{
2284	struct drm_i915_private *dev_priv = vm->i915;
2285
2286	/*
2287	 * Make sure the internal GAM fifo has been cleared of all GTT
2288	 * writes before exiting stop_machine(). This guarantees that
2289	 * any aperture accesses waiting to start in another process
2290	 * cannot back up behind the GTT writes causing a hang.
2291	 * The register can be any arbitrary GAM register.
2292	 */
2293	POSTING_READ(GFX_FLSH_CNTL_GEN6);
2294}
2295
2296struct insert_page {
2297	struct i915_address_space *vm;
2298	dma_addr_t addr;
2299	u64 offset;
2300	enum i915_cache_level level;
2301};
2302
2303static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
2304{
2305	struct insert_page *arg = _arg;
2306
2307	gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0);
2308	bxt_vtd_ggtt_wa(arg->vm);
2309
2310	return 0;
2311}
2312
2313static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
2314					  dma_addr_t addr,
2315					  u64 offset,
2316					  enum i915_cache_level level,
2317					  u32 unused)
2318{
2319	struct insert_page arg = { vm, addr, offset, level };
2320
2321	stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
2322}
2323
2324struct insert_entries {
2325	struct i915_address_space *vm;
2326	struct i915_vma *vma;
2327	enum i915_cache_level level;
2328	u32 flags;
2329};
2330
2331static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
2332{
2333	struct insert_entries *arg = _arg;
2334
2335	gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, arg->flags);
2336	bxt_vtd_ggtt_wa(arg->vm);
2337
2338	return 0;
2339}
2340
2341static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
2342					     struct i915_vma *vma,
2343					     enum i915_cache_level level,
2344					     u32 flags)
2345{
2346	struct insert_entries arg = { vm, vma, level, flags };
2347
2348	stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
2349}
2350
2351struct clear_range {
2352	struct i915_address_space *vm;
2353	u64 start;
2354	u64 length;
2355};
2356
2357static int bxt_vtd_ggtt_clear_range__cb(void *_arg)
2358{
2359	struct clear_range *arg = _arg;
2360
2361	gen8_ggtt_clear_range(arg->vm, arg->start, arg->length);
2362	bxt_vtd_ggtt_wa(arg->vm);
2363
2364	return 0;
2365}
2366
2367static void bxt_vtd_ggtt_clear_range__BKL(struct i915_address_space *vm,
2368					  u64 start,
2369					  u64 length)
2370{
2371	struct clear_range arg = { vm, start, length };
2372
2373	stop_machine(bxt_vtd_ggtt_clear_range__cb, &arg, NULL);
2374}
2375
2376static void gen6_ggtt_clear_range(struct i915_address_space *vm,
2377				  u64 start, u64 length)
2378{
2379	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2380	unsigned first_entry = start / I915_GTT_PAGE_SIZE;
2381	unsigned num_entries = length / I915_GTT_PAGE_SIZE;
2382	gen6_pte_t scratch_pte, __iomem *gtt_base =
2383		(gen6_pte_t __iomem *)ggtt->gsm + first_entry;
2384	const int max_entries = ggtt_total_entries(ggtt) - first_entry;
 
 
2385	int i;
2386
2387	if (WARN(num_entries > max_entries,
2388		 "First entry = %d; Num entries = %d (max=%d)\n",
2389		 first_entry, num_entries, max_entries))
2390		num_entries = max_entries;
2391
2392	scratch_pte = vm->scratch[0].encode;
 
2393	for (i = 0; i < num_entries; i++)
2394		iowrite32(scratch_pte, &gtt_base[i]);
 
2395}
2396
2397static void i915_ggtt_insert_page(struct i915_address_space *vm,
2398				  dma_addr_t addr,
2399				  u64 offset,
2400				  enum i915_cache_level cache_level,
2401				  u32 unused)
2402{
2403	unsigned int flags = (cache_level == I915_CACHE_NONE) ?
2404		AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
2405
2406	intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
2407}
2408
2409static void i915_ggtt_insert_entries(struct i915_address_space *vm,
2410				     struct i915_vma *vma,
2411				     enum i915_cache_level cache_level,
2412				     u32 unused)
2413{
 
2414	unsigned int flags = (cache_level == I915_CACHE_NONE) ?
2415		AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
2416
2417	intel_gtt_insert_sg_entries(vma->pages, vma->node.start >> PAGE_SHIFT,
2418				    flags);
 
2419}
2420
2421static void i915_ggtt_clear_range(struct i915_address_space *vm,
2422				  u64 start, u64 length)
2423{
2424	intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
2425}
2426
2427static int ggtt_bind_vma(struct i915_vma *vma,
2428			 enum i915_cache_level cache_level,
2429			 u32 flags)
2430{
2431	struct drm_i915_private *i915 = vma->vm->i915;
2432	struct drm_i915_gem_object *obj = vma->obj;
2433	intel_wakeref_t wakeref;
2434	u32 pte_flags;
2435
2436	/* Applicable to VLV (gen8+ do not support RO in the GGTT) */
2437	pte_flags = 0;
2438	if (i915_gem_object_is_readonly(obj))
2439		pte_flags |= PTE_READ_ONLY;
2440
2441	with_intel_runtime_pm(&i915->runtime_pm, wakeref)
2442		vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
2443
2444	vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
2445
2446	/*
2447	 * Without aliasing PPGTT there's no difference between
2448	 * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
2449	 * upgrade to both bound if we bind either to avoid double-binding.
2450	 */
2451	vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
2452
2453	return 0;
2454}
2455
2456static void ggtt_unbind_vma(struct i915_vma *vma)
2457{
2458	struct drm_i915_private *i915 = vma->vm->i915;
2459	intel_wakeref_t wakeref;
2460
2461	with_intel_runtime_pm(&i915->runtime_pm, wakeref)
2462		vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
 
2463}
2464
2465static int aliasing_gtt_bind_vma(struct i915_vma *vma,
2466				 enum i915_cache_level cache_level,
2467				 u32 flags)
2468{
2469	struct drm_i915_private *i915 = vma->vm->i915;
2470	u32 pte_flags;
2471	int ret;
2472
2473	/* Currently applicable only to VLV */
2474	pte_flags = 0;
2475	if (i915_gem_object_is_readonly(vma->obj))
2476		pte_flags |= PTE_READ_ONLY;
2477
2478	if (flags & I915_VMA_LOCAL_BIND) {
2479		struct i915_ppgtt *alias = i915_vm_to_ggtt(vma->vm)->alias;
2480
2481		if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
2482			ret = alias->vm.allocate_va_range(&alias->vm,
2483							  vma->node.start,
2484							  vma->size);
2485			if (ret)
2486				return ret;
 
 
 
 
2487		}
2488
2489		alias->vm.insert_entries(&alias->vm, vma,
2490					 cache_level, pte_flags);
2491	}
2492
2493	if (flags & I915_VMA_GLOBAL_BIND) {
2494		intel_wakeref_t wakeref;
2495
2496		with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
2497			vma->vm->insert_entries(vma->vm, vma,
2498						cache_level, pte_flags);
2499		}
 
 
2500	}
2501
2502	return 0;
2503}
2504
2505static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
2506{
2507	struct drm_i915_private *i915 = vma->vm->i915;
2508
2509	if (vma->flags & I915_VMA_GLOBAL_BIND) {
2510		struct i915_address_space *vm = vma->vm;
2511		intel_wakeref_t wakeref;
2512
2513		with_intel_runtime_pm(&i915->runtime_pm, wakeref)
2514			vm->clear_range(vm, vma->node.start, vma->size);
 
 
 
 
2515	}
2516
2517	if (vma->flags & I915_VMA_LOCAL_BIND) {
2518		struct i915_address_space *vm =
2519			&i915_vm_to_ggtt(vma->vm)->alias->vm;
2520
2521		vm->clear_range(vm, vma->node.start, vma->size);
 
 
2522	}
2523}
2524
2525void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
2526			       struct sg_table *pages)
2527{
2528	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2529	struct device *kdev = &dev_priv->drm.pdev->dev;
2530	struct i915_ggtt *ggtt = &dev_priv->ggtt;
2531
2532	if (unlikely(ggtt->do_idle_maps)) {
2533		if (i915_gem_wait_for_idle(dev_priv, 0, MAX_SCHEDULE_TIMEOUT)) {
2534			DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
2535			/* Wait a bit, in hopes it avoids the hang */
2536			udelay(10);
2537		}
2538	}
2539
2540	dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL);
2541}
2542
2543static int ggtt_set_pages(struct i915_vma *vma)
2544{
2545	int ret;
2546
2547	GEM_BUG_ON(vma->pages);
2548
2549	ret = i915_get_ggtt_vma_pages(vma);
2550	if (ret)
2551		return ret;
2552
2553	vma->page_sizes = vma->obj->mm.page_sizes;
2554
2555	return 0;
2556}
2557
2558static void i915_gtt_color_adjust(const struct drm_mm_node *node,
2559				  unsigned long color,
2560				  u64 *start,
2561				  u64 *end)
2562{
2563	if (node->allocated && node->color != color)
2564		*start += I915_GTT_PAGE_SIZE;
2565
2566	/* Also leave a space between the unallocated reserved node after the
2567	 * GTT and any objects within the GTT, i.e. we use the color adjustment
2568	 * to insert a guard page to prevent prefetches crossing over the
2569	 * GTT boundary.
2570	 */
2571	node = list_next_entry(node, node_list);
2572	if (node->color != color)
2573		*end -= I915_GTT_PAGE_SIZE;
2574}
2575
2576static int init_aliasing_ppgtt(struct i915_ggtt *ggtt)
2577{
2578	struct i915_ppgtt *ppgtt;
2579	int err;
2580
2581	ppgtt = i915_ppgtt_create(ggtt->vm.i915);
2582	if (IS_ERR(ppgtt))
2583		return PTR_ERR(ppgtt);
2584
2585	if (GEM_WARN_ON(ppgtt->vm.total < ggtt->vm.total)) {
2586		err = -ENODEV;
2587		goto err_ppgtt;
2588	}
2589
2590	/*
2591	 * Note we only pre-allocate as far as the end of the global
2592	 * GTT. On 48b / 4-level page-tables, the difference is very,
2593	 * very significant! We have to preallocate as GVT/vgpu does
2594	 * not like the page directory disappearing.
2595	 */
2596	err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, ggtt->vm.total);
2597	if (err)
2598		goto err_ppgtt;
2599
2600	ggtt->alias = ppgtt;
2601
2602	GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != ggtt_bind_vma);
2603	ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma;
2604
2605	GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != ggtt_unbind_vma);
2606	ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma;
2607
2608	return 0;
2609
2610err_ppgtt:
2611	i915_vm_put(&ppgtt->vm);
2612	return err;
2613}
2614
2615static void fini_aliasing_ppgtt(struct i915_ggtt *ggtt)
2616{
2617	struct drm_i915_private *i915 = ggtt->vm.i915;
2618	struct i915_ppgtt *ppgtt;
2619
2620	mutex_lock(&i915->drm.struct_mutex);
2621
2622	ppgtt = fetch_and_zero(&ggtt->alias);
2623	if (!ppgtt)
2624		goto out;
2625
2626	i915_vm_put(&ppgtt->vm);
2627
2628	ggtt->vm.vma_ops.bind_vma   = ggtt_bind_vma;
2629	ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
2630
2631out:
2632	mutex_unlock(&i915->drm.struct_mutex);
2633}
2634
2635static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt)
2636{
2637	u64 size;
2638	int ret;
2639
2640	if (!USES_GUC(ggtt->vm.i915))
2641		return 0;
2642
2643	GEM_BUG_ON(ggtt->vm.total <= GUC_GGTT_TOP);
2644	size = ggtt->vm.total - GUC_GGTT_TOP;
2645
2646	ret = i915_gem_gtt_reserve(&ggtt->vm, &ggtt->uc_fw, size,
2647				   GUC_GGTT_TOP, I915_COLOR_UNEVICTABLE,
2648				   PIN_NOEVICT);
2649	if (ret)
2650		DRM_DEBUG_DRIVER("Failed to reserve top of GGTT for GuC\n");
2651
2652	return ret;
2653}
2654
2655static void ggtt_release_guc_top(struct i915_ggtt *ggtt)
2656{
2657	if (drm_mm_node_allocated(&ggtt->uc_fw))
2658		drm_mm_remove_node(&ggtt->uc_fw);
2659}
2660
2661static void cleanup_init_ggtt(struct i915_ggtt *ggtt)
2662{
2663	ggtt_release_guc_top(ggtt);
2664	drm_mm_remove_node(&ggtt->error_capture);
2665}
2666
2667static int init_ggtt(struct i915_ggtt *ggtt)
2668{
2669	/* Let GEM Manage all of the aperture.
2670	 *
2671	 * However, leave one page at the end still bound to the scratch page.
2672	 * There are a number of places where the hardware apparently prefetches
2673	 * past the end of the object, and we've seen multiple hangs with the
2674	 * GPU head pointer stuck in a batchbuffer bound at the last page of the
2675	 * aperture.  One page should be enough to keep any prefetching inside
2676	 * of the aperture.
2677	 */
2678	unsigned long hole_start, hole_end;
 
2679	struct drm_mm_node *entry;
2680	int ret;
 
2681
2682	/*
2683	 * GuC requires all resources that we're sharing with it to be placed in
2684	 * non-WOPCM memory. If GuC is not present or not in use we still need a
2685	 * small bias as ring wraparound at offset 0 sometimes hangs. No idea
2686	 * why.
2687	 */
2688	ggtt->pin_bias = max_t(u32, I915_GTT_PAGE_SIZE,
2689			       intel_wopcm_guc_size(&ggtt->vm.i915->wopcm));
2690
2691	ret = intel_vgt_balloon(ggtt);
2692	if (ret)
2693		return ret;
 
 
 
 
 
 
 
 
2694
2695	/* Reserve a mappable slot for our lockless error capture */
2696	ret = drm_mm_insert_node_in_range(&ggtt->vm.mm, &ggtt->error_capture,
2697					  PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
2698					  0, ggtt->mappable_end,
2699					  DRM_MM_INSERT_LOW);
2700	if (ret)
2701		return ret;
2702
2703	/*
2704	 * The upper portion of the GuC address space has a sizeable hole
2705	 * (several MB) that is inaccessible by GuC. Reserve this range within
2706	 * GGTT as it can comfortably hold GuC/HuC firmware images.
2707	 */
2708	ret = ggtt_reserve_guc_top(ggtt);
2709	if (ret)
2710		goto err;
2711
2712	/* Clear any non-preallocated blocks */
2713	drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) {
2714		DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
2715			      hole_start, hole_end);
2716		ggtt->vm.clear_range(&ggtt->vm, hole_start,
2717				     hole_end - hole_start);
2718	}
2719
2720	/* And finally clear the reserved guard page */
2721	ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE);
2722
2723	return 0;
2724
2725err:
2726	cleanup_init_ggtt(ggtt);
2727	return ret;
2728}
2729
2730int i915_init_ggtt(struct drm_i915_private *i915)
2731{
2732	int ret;
2733
2734	ret = init_ggtt(&i915->ggtt);
2735	if (ret)
2736		return ret;
2737
2738	if (INTEL_PPGTT(i915) == INTEL_PPGTT_ALIASING) {
2739		ret = init_aliasing_ppgtt(&i915->ggtt);
2740		if (ret)
2741			cleanup_init_ggtt(&i915->ggtt);
2742	}
2743
2744	return 0;
2745}
2746
2747static void ggtt_cleanup_hw(struct i915_ggtt *ggtt)
2748{
2749	struct drm_i915_private *i915 = ggtt->vm.i915;
2750	struct i915_vma *vma, *vn;
2751
2752	ggtt->vm.closed = true;
2753
2754	rcu_barrier(); /* flush the RCU'ed__i915_vm_release */
2755	flush_workqueue(i915->wq);
2756
2757	mutex_lock(&i915->drm.struct_mutex);
2758
2759	list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link)
2760		WARN_ON(i915_vma_unbind(vma));
2761
2762	if (drm_mm_node_allocated(&ggtt->error_capture))
2763		drm_mm_remove_node(&ggtt->error_capture);
2764
2765	ggtt_release_guc_top(ggtt);
2766
2767	if (drm_mm_initialized(&ggtt->vm.mm)) {
2768		intel_vgt_deballoon(ggtt);
2769		i915_address_space_fini(&ggtt->vm);
2770	}
2771
2772	ggtt->vm.cleanup(&ggtt->vm);
 
 
 
 
2773
2774	mutex_unlock(&i915->drm.struct_mutex);
 
 
 
 
 
 
 
 
 
2775
2776	arch_phys_wc_del(ggtt->mtrr);
2777	io_mapping_fini(&ggtt->iomap);
2778}
2779
2780/**
2781 * i915_ggtt_driver_release - Clean up GGTT hardware initialization
2782 * @i915: i915 device
2783 */
2784void i915_ggtt_driver_release(struct drm_i915_private *i915)
2785{
2786	struct pagevec *pvec;
2787
2788	fini_aliasing_ppgtt(&i915->ggtt);
2789
2790	ggtt_cleanup_hw(&i915->ggtt);
2791
2792	pvec = &i915->mm.wc_stash.pvec;
2793	if (pvec->nr) {
2794		set_pages_array_wb(pvec->pages, pvec->nr);
2795		__pagevec_release(pvec);
2796	}
2797
2798	i915_gem_cleanup_stolen(i915);
2799}
2800
2801static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
2802{
2803	snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
2804	snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
2805	return snb_gmch_ctl << 20;
2806}
2807
2808static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
2809{
2810	bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
2811	bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
2812	if (bdw_gmch_ctl)
2813		bdw_gmch_ctl = 1 << bdw_gmch_ctl;
2814
2815#ifdef CONFIG_X86_32
2816	/* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * I915_GTT_PAGE_SIZE */
2817	if (bdw_gmch_ctl > 4)
2818		bdw_gmch_ctl = 4;
2819#endif
2820
2821	return bdw_gmch_ctl << 20;
2822}
2823
2824static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
2825{
2826	gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
2827	gmch_ctrl &= SNB_GMCH_GGMS_MASK;
2828
2829	if (gmch_ctrl)
2830		return 1 << (20 + gmch_ctrl);
2831
2832	return 0;
 
 
 
 
2833}
2834
2835static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
 
2836{
2837	struct drm_i915_private *dev_priv = ggtt->vm.i915;
2838	struct pci_dev *pdev = dev_priv->drm.pdev;
2839	phys_addr_t phys_addr;
2840	int ret;
2841
2842	/* For Modern GENs the PTEs and register space are split in the BAR */
2843	phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2;
 
2844
2845	/*
2846	 * On BXT+/CNL+ writes larger than 64 bit to the GTT pagetable range
2847	 * will be dropped. For WC mappings in general we have 64 byte burst
2848	 * writes when the WC buffer is flushed, so we can't use it, but have to
2849	 * resort to an uncached mapping. The WC issue is easily caught by the
2850	 * readback check when writing GTT PTE entries.
2851	 */
2852	if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10)
2853		ggtt->gsm = ioremap_nocache(phys_addr, size);
2854	else
2855		ggtt->gsm = ioremap_wc(phys_addr, size);
2856	if (!ggtt->gsm) {
2857		DRM_ERROR("Failed to map the ggtt page table\n");
2858		return -ENOMEM;
2859	}
2860
2861	ret = setup_scratch_page(&ggtt->vm, GFP_DMA32);
2862	if (ret) {
2863		DRM_ERROR("Scratch setup failed\n");
2864		/* iounmap will also get called at remove, but meh */
2865		iounmap(ggtt->gsm);
2866		return ret;
2867	}
2868
2869	ggtt->vm.scratch[0].encode =
2870		ggtt->vm.pte_encode(px_dma(&ggtt->vm.scratch[0]),
2871				    I915_CACHE_NONE, 0);
2872
2873	return 0;
2874}
2875
2876static void tgl_setup_private_ppat(struct drm_i915_private *dev_priv)
2877{
2878	/* TGL doesn't support LLC or AGE settings */
2879	I915_WRITE(GEN12_PAT_INDEX(0), GEN8_PPAT_WB);
2880	I915_WRITE(GEN12_PAT_INDEX(1), GEN8_PPAT_WC);
2881	I915_WRITE(GEN12_PAT_INDEX(2), GEN8_PPAT_WT);
2882	I915_WRITE(GEN12_PAT_INDEX(3), GEN8_PPAT_UC);
2883	I915_WRITE(GEN12_PAT_INDEX(4), GEN8_PPAT_WB);
2884	I915_WRITE(GEN12_PAT_INDEX(5), GEN8_PPAT_WB);
2885	I915_WRITE(GEN12_PAT_INDEX(6), GEN8_PPAT_WB);
2886	I915_WRITE(GEN12_PAT_INDEX(7), GEN8_PPAT_WB);
2887}
2888
2889static void cnl_setup_private_ppat(struct drm_i915_private *dev_priv)
2890{
2891	I915_WRITE(GEN10_PAT_INDEX(0), GEN8_PPAT_WB | GEN8_PPAT_LLC);
2892	I915_WRITE(GEN10_PAT_INDEX(1), GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
2893	I915_WRITE(GEN10_PAT_INDEX(2), GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
2894	I915_WRITE(GEN10_PAT_INDEX(3), GEN8_PPAT_UC);
2895	I915_WRITE(GEN10_PAT_INDEX(4), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
2896	I915_WRITE(GEN10_PAT_INDEX(5), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
2897	I915_WRITE(GEN10_PAT_INDEX(6), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
2898	I915_WRITE(GEN10_PAT_INDEX(7), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
2899}
2900
2901/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
2902 * bits. When using advanced contexts each context stores its own PAT, but
2903 * writing this data shouldn't be harmful even in those cases. */
2904static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv)
2905{
2906	u64 pat;
2907
2908	pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) |	/* for normal objects, no eLLC */
2909	      GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) |	/* for something pointing to ptes? */
2910	      GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) |	/* for scanout with eLLC */
2911	      GEN8_PPAT(3, GEN8_PPAT_UC) |			/* Uncached objects, mostly for scanout */
 
 
 
 
 
 
 
 
 
 
 
2912	      GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
2913	      GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
2914	      GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
2915	      GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
2916
2917	I915_WRITE(GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
2918	I915_WRITE(GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
2919}
2920
2921static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
2922{
2923	u64 pat;
2924
2925	/*
2926	 * Map WB on BDW to snooped on CHV.
2927	 *
2928	 * Only the snoop bit has meaning for CHV, the rest is
2929	 * ignored.
2930	 *
2931	 * The hardware will never snoop for certain types of accesses:
2932	 * - CPU GTT (GMADR->GGTT->no snoop->memory)
2933	 * - PPGTT page tables
2934	 * - some other special cycles
2935	 *
2936	 * As with BDW, we also need to consider the following for GT accesses:
2937	 * "For GGTT, there is NO pat_sel[2:0] from the entry,
2938	 * so RTL will always use the value corresponding to
2939	 * pat_sel = 000".
2940	 * Which means we must set the snoop bit in PAT entry 0
2941	 * in order to keep the global status page working.
2942	 */
2943
2944	pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
2945	      GEN8_PPAT(1, 0) |
2946	      GEN8_PPAT(2, 0) |
2947	      GEN8_PPAT(3, 0) |
2948	      GEN8_PPAT(4, CHV_PPAT_SNOOP) |
2949	      GEN8_PPAT(5, CHV_PPAT_SNOOP) |
2950	      GEN8_PPAT(6, CHV_PPAT_SNOOP) |
2951	      GEN8_PPAT(7, CHV_PPAT_SNOOP);
2952
2953	I915_WRITE(GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
2954	I915_WRITE(GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
2955}
2956
2957static void gen6_gmch_remove(struct i915_address_space *vm)
 
 
 
 
2958{
2959	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2960
2961	iounmap(ggtt->gsm);
2962	cleanup_scratch_page(vm);
2963}
2964
2965static void setup_private_pat(struct drm_i915_private *dev_priv)
2966{
2967	GEM_BUG_ON(INTEL_GEN(dev_priv) < 8);
2968
2969	if (INTEL_GEN(dev_priv) >= 12)
2970		tgl_setup_private_ppat(dev_priv);
2971	else if (INTEL_GEN(dev_priv) >= 10)
2972		cnl_setup_private_ppat(dev_priv);
2973	else if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
2974		chv_setup_private_ppat(dev_priv);
2975	else
2976		bdw_setup_private_ppat(dev_priv);
2977}
2978
2979static int gen8_gmch_probe(struct i915_ggtt *ggtt)
2980{
2981	struct drm_i915_private *dev_priv = ggtt->vm.i915;
2982	struct pci_dev *pdev = dev_priv->drm.pdev;
2983	unsigned int size;
2984	u16 snb_gmch_ctl;
2985	int err;
2986
2987	/* TODO: We're not aware of mappable constraints on gen8 yet */
2988	ggtt->gmadr =
2989		(struct resource) DEFINE_RES_MEM(pci_resource_start(pdev, 2),
2990						 pci_resource_len(pdev, 2));
2991	ggtt->mappable_end = resource_size(&ggtt->gmadr);
2992
2993	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(39));
2994	if (!err)
2995		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39));
2996	if (err)
2997		DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
2998
2999	pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
3000	if (IS_CHERRYVIEW(dev_priv))
3001		size = chv_get_total_gtt_size(snb_gmch_ctl);
3002	else
3003		size = gen8_get_total_gtt_size(snb_gmch_ctl);
3004
3005	ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
3006	ggtt->vm.cleanup = gen6_gmch_remove;
3007	ggtt->vm.insert_page = gen8_ggtt_insert_page;
3008	ggtt->vm.clear_range = nop_clear_range;
3009	if (intel_scanout_needs_vtd_wa(dev_priv))
3010		ggtt->vm.clear_range = gen8_ggtt_clear_range;
3011
3012	ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
3013
3014	/* Serialize GTT updates with aperture access on BXT if VT-d is on. */
3015	if (intel_ggtt_update_needs_vtd_wa(dev_priv) ||
3016	    IS_CHERRYVIEW(dev_priv) /* fails with concurrent use/update */) {
3017		ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
3018		ggtt->vm.insert_page    = bxt_vtd_ggtt_insert_page__BKL;
3019		if (ggtt->vm.clear_range != nop_clear_range)
3020			ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL;
3021	}
3022
3023	ggtt->invalidate = gen6_ggtt_invalidate;
 
3024
3025	ggtt->vm.vma_ops.bind_vma    = ggtt_bind_vma;
3026	ggtt->vm.vma_ops.unbind_vma  = ggtt_unbind_vma;
3027	ggtt->vm.vma_ops.set_pages   = ggtt_set_pages;
3028	ggtt->vm.vma_ops.clear_pages = clear_pages;
3029
3030	ggtt->vm.pte_encode = gen8_pte_encode;
3031
3032	setup_private_pat(dev_priv);
 
3033
3034	return ggtt_probe_common(ggtt, size);
3035}
3036
3037static int gen6_gmch_probe(struct i915_ggtt *ggtt)
 
 
 
 
3038{
3039	struct drm_i915_private *dev_priv = ggtt->vm.i915;
3040	struct pci_dev *pdev = dev_priv->drm.pdev;
3041	unsigned int size;
3042	u16 snb_gmch_ctl;
3043	int err;
3044
3045	ggtt->gmadr =
3046		(struct resource) DEFINE_RES_MEM(pci_resource_start(pdev, 2),
3047						 pci_resource_len(pdev, 2));
3048	ggtt->mappable_end = resource_size(&ggtt->gmadr);
3049
3050	/* 64/512MB is the current min/max we actually know of, but this is just
3051	 * a coarse sanity check.
3052	 */
3053	if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) {
3054		DRM_ERROR("Unknown GMADR size (%pa)\n", &ggtt->mappable_end);
 
3055		return -ENXIO;
3056	}
3057
3058	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
3059	if (!err)
3060		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
3061	if (err)
3062		DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
3063	pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
3064
3065	size = gen6_get_total_gtt_size(snb_gmch_ctl);
3066	ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE;
3067
3068	ggtt->vm.clear_range = nop_clear_range;
3069	if (!HAS_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv))
3070		ggtt->vm.clear_range = gen6_ggtt_clear_range;
3071	ggtt->vm.insert_page = gen6_ggtt_insert_page;
3072	ggtt->vm.insert_entries = gen6_ggtt_insert_entries;
3073	ggtt->vm.cleanup = gen6_gmch_remove;
3074
3075	ggtt->invalidate = gen6_ggtt_invalidate;
3076
3077	if (HAS_EDRAM(dev_priv))
3078		ggtt->vm.pte_encode = iris_pte_encode;
3079	else if (IS_HASWELL(dev_priv))
3080		ggtt->vm.pte_encode = hsw_pte_encode;
3081	else if (IS_VALLEYVIEW(dev_priv))
3082		ggtt->vm.pte_encode = byt_pte_encode;
3083	else if (INTEL_GEN(dev_priv) >= 7)
3084		ggtt->vm.pte_encode = ivb_pte_encode;
3085	else
3086		ggtt->vm.pte_encode = snb_pte_encode;
3087
3088	ggtt->vm.vma_ops.bind_vma    = ggtt_bind_vma;
3089	ggtt->vm.vma_ops.unbind_vma  = ggtt_unbind_vma;
3090	ggtt->vm.vma_ops.set_pages   = ggtt_set_pages;
3091	ggtt->vm.vma_ops.clear_pages = clear_pages;
3092
3093	return ggtt_probe_common(ggtt, size);
 
 
 
 
 
 
 
 
3094}
3095
3096static void i915_gmch_remove(struct i915_address_space *vm)
3097{
3098	intel_gmch_remove();
 
 
 
 
 
3099}
3100
3101static int i915_gmch_probe(struct i915_ggtt *ggtt)
 
 
 
 
3102{
3103	struct drm_i915_private *dev_priv = ggtt->vm.i915;
3104	phys_addr_t gmadr_base;
3105	int ret;
3106
3107	ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL);
3108	if (!ret) {
3109		DRM_ERROR("failed to set up gmch\n");
3110		return -EIO;
3111	}
3112
3113	intel_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end);
3114
3115	ggtt->gmadr =
3116		(struct resource) DEFINE_RES_MEM(gmadr_base,
3117						 ggtt->mappable_end);
3118
3119	ggtt->do_idle_maps = needs_idle_maps(dev_priv);
3120	ggtt->vm.insert_page = i915_ggtt_insert_page;
3121	ggtt->vm.insert_entries = i915_ggtt_insert_entries;
3122	ggtt->vm.clear_range = i915_ggtt_clear_range;
3123	ggtt->vm.cleanup = i915_gmch_remove;
3124
3125	ggtt->invalidate = gmch_ggtt_invalidate;
 
3126
3127	ggtt->vm.vma_ops.bind_vma    = ggtt_bind_vma;
3128	ggtt->vm.vma_ops.unbind_vma  = ggtt_unbind_vma;
3129	ggtt->vm.vma_ops.set_pages   = ggtt_set_pages;
3130	ggtt->vm.vma_ops.clear_pages = clear_pages;
3131
3132	if (unlikely(ggtt->do_idle_maps))
3133		dev_notice(dev_priv->drm.dev,
3134			   "Applying Ironlake quirks for intel_iommu\n");
3135
3136	return 0;
3137}
3138
3139static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt)
3140{
3141	struct drm_i915_private *i915 = gt->i915;
3142	int ret;
3143
3144	ggtt->vm.gt = gt;
3145	ggtt->vm.i915 = i915;
3146	ggtt->vm.dma = &i915->drm.pdev->dev;
3147
3148	if (INTEL_GEN(i915) <= 5)
3149		ret = i915_gmch_probe(ggtt);
3150	else if (INTEL_GEN(i915) < 8)
3151		ret = gen6_gmch_probe(ggtt);
3152	else
3153		ret = gen8_gmch_probe(ggtt);
3154	if (ret)
3155		return ret;
3156
3157	if ((ggtt->vm.total - 1) >> 32) {
3158		DRM_ERROR("We never expected a Global GTT with more than 32bits"
3159			  " of address space! Found %lldM!\n",
3160			  ggtt->vm.total >> 20);
3161		ggtt->vm.total = 1ULL << 32;
3162		ggtt->mappable_end =
3163			min_t(u64, ggtt->mappable_end, ggtt->vm.total);
3164	}
3165
3166	if (ggtt->mappable_end > ggtt->vm.total) {
3167		DRM_ERROR("mappable aperture extends past end of GGTT,"
3168			  " aperture=%pa, total=%llx\n",
3169			  &ggtt->mappable_end, ggtt->vm.total);
3170		ggtt->mappable_end = ggtt->vm.total;
3171	}
3172
3173	/* GMADR is the PCI mmio aperture into the global GTT. */
3174	DRM_DEBUG_DRIVER("GGTT size = %lluM\n", ggtt->vm.total >> 20);
3175	DRM_DEBUG_DRIVER("GMADR size = %lluM\n", (u64)ggtt->mappable_end >> 20);
3176	DRM_DEBUG_DRIVER("DSM size = %lluM\n",
3177			 (u64)resource_size(&intel_graphics_stolen_res) >> 20);
3178
3179	return 0;
3180}
3181
3182/**
3183 * i915_ggtt_probe_hw - Probe GGTT hardware location
3184 * @i915: i915 device
3185 */
3186int i915_ggtt_probe_hw(struct drm_i915_private *i915)
3187{
 
 
3188	int ret;
3189
3190	ret = ggtt_probe_hw(&i915->ggtt, &i915->gt);
3191	if (ret)
3192		return ret;
3193
3194	if (intel_vtd_active())
3195		dev_info(i915->drm.dev, "VT-d active for gfx access\n");
3196
3197	return 0;
3198}
3199
3200static int ggtt_init_hw(struct i915_ggtt *ggtt)
3201{
3202	struct drm_i915_private *i915 = ggtt->vm.i915;
3203	int ret = 0;
3204
3205	mutex_lock(&i915->drm.struct_mutex);
3206
3207	i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
3208
3209	ggtt->vm.is_ggtt = true;
3210
3211	/* Only VLV supports read-only GGTT mappings */
3212	ggtt->vm.has_read_only = IS_VALLEYVIEW(i915);
3213
3214	if (!HAS_LLC(i915) && !HAS_PPGTT(i915))
3215		ggtt->vm.mm.color_adjust = i915_gtt_color_adjust;
3216
3217	if (!io_mapping_init_wc(&ggtt->iomap,
3218				ggtt->gmadr.start,
3219				ggtt->mappable_end)) {
3220		ggtt->vm.cleanup(&ggtt->vm);
3221		ret = -EIO;
3222		goto out;
3223	}
3224
3225	ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start, ggtt->mappable_end);
3226
3227	i915_ggtt_init_fences(ggtt);
3228
3229out:
3230	mutex_unlock(&i915->drm.struct_mutex);
3231
3232	return ret;
3233}
3234
3235/**
3236 * i915_ggtt_init_hw - Initialize GGTT hardware
3237 * @dev_priv: i915 device
3238 */
3239int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
3240{
3241	int ret;
3242
3243	stash_init(&dev_priv->mm.wc_stash);
3244
3245	/* Note that we use page colouring to enforce a guard page at the
3246	 * end of the address space. This is required as the CS may prefetch
3247	 * beyond the end of the batch buffer, across the page boundary,
3248	 * and beyond the end of the GTT if we do not provide a guard.
3249	 */
3250	ret = ggtt_init_hw(&dev_priv->ggtt);
3251	if (ret)
3252		return ret;
3253
 
 
 
 
 
 
 
3254	/*
3255	 * Initialise stolen early so that we may reserve preallocated
3256	 * objects for the BIOS to KMS transition.
 
 
3257	 */
3258	ret = i915_gem_init_stolen(dev_priv);
3259	if (ret)
3260		goto out_gtt_cleanup;
3261
3262	return 0;
3263
3264out_gtt_cleanup:
3265	dev_priv->ggtt.vm.cleanup(&dev_priv->ggtt.vm);
3266	return ret;
3267}
3268
3269int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv)
3270{
3271	if (INTEL_GEN(dev_priv) < 6 && !intel_enable_gtt())
3272		return -EIO;
3273
3274	return 0;
3275}
3276
3277void i915_ggtt_enable_guc(struct i915_ggtt *ggtt)
 
3278{
3279	GEM_BUG_ON(ggtt->invalidate != gen6_ggtt_invalidate);
3280
3281	ggtt->invalidate = guc_ggtt_invalidate;
3282
3283	ggtt->invalidate(ggtt);
3284}
3285
3286void i915_ggtt_disable_guc(struct i915_ggtt *ggtt)
3287{
3288	/* XXX Temporary pardon for error unload */
3289	if (ggtt->invalidate == gen6_ggtt_invalidate)
3290		return;
3291
3292	/* We should only be called after i915_ggtt_enable_guc() */
3293	GEM_BUG_ON(ggtt->invalidate != guc_ggtt_invalidate);
3294
3295	ggtt->invalidate = gen6_ggtt_invalidate;
3296
3297	ggtt->invalidate(ggtt);
3298}
3299
3300static void ggtt_restore_mappings(struct i915_ggtt *ggtt)
3301{
3302	struct i915_vma *vma, *vn;
3303	bool flush = false;
3304
3305	intel_gt_check_and_clear_faults(ggtt->vm.gt);
3306
3307	mutex_lock(&ggtt->vm.mutex);
3308
3309	/* First fill our portion of the GTT with scratch pages */
3310	ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
3311	ggtt->vm.closed = true; /* skip rewriting PTE on VMA unbind */
3312
3313	/* clflush objects bound into the GGTT and rebind them. */
3314	list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) {
3315		struct drm_i915_gem_object *obj = vma->obj;
3316
3317		if (!(vma->flags & I915_VMA_GLOBAL_BIND))
3318			continue;
3319
3320		mutex_unlock(&ggtt->vm.mutex);
3321
3322		if (!i915_vma_unbind(vma))
3323			goto lock;
3324
3325		WARN_ON(i915_vma_bind(vma,
3326				      obj ? obj->cache_level : 0,
3327				      PIN_UPDATE));
3328		if (obj) { /* only used during resume => exclusive access */
3329			flush |= fetch_and_zero(&obj->write_domain);
3330			obj->read_domains |= I915_GEM_DOMAIN_GTT;
3331		}
3332
3333lock:
3334		mutex_lock(&ggtt->vm.mutex);
3335	}
3336
3337	ggtt->vm.closed = false;
3338	ggtt->invalidate(ggtt);
3339
3340	mutex_unlock(&ggtt->vm.mutex);
3341
3342	if (flush)
3343		wbinvd_on_all_cpus();
3344}
3345
3346void i915_gem_restore_gtt_mappings(struct drm_i915_private *i915)
3347{
3348	ggtt_restore_mappings(&i915->ggtt);
3349
3350	if (INTEL_GEN(i915) >= 8)
3351		setup_private_pat(i915);
3352}
3353
3354static struct scatterlist *
3355rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset,
3356	     unsigned int width, unsigned int height,
3357	     unsigned int stride,
3358	     struct sg_table *st, struct scatterlist *sg)
3359{
3360	unsigned int column, row;
3361	unsigned int src_idx;
3362
3363	for (column = 0; column < width; column++) {
3364		src_idx = stride * (height - 1) + column + offset;
3365		for (row = 0; row < height; row++) {
3366			st->nents++;
3367			/* We don't need the pages, but need to initialize
3368			 * the entries so the sg list can be happily traversed.
3369			 * The only thing we need are DMA addresses.
3370			 */
3371			sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0);
3372			sg_dma_address(sg) =
3373				i915_gem_object_get_dma_address(obj, src_idx);
3374			sg_dma_len(sg) = I915_GTT_PAGE_SIZE;
3375			sg = sg_next(sg);
3376			src_idx -= stride;
3377		}
3378	}
3379
3380	return sg;
3381}
3382
3383static noinline struct sg_table *
3384intel_rotate_pages(struct intel_rotation_info *rot_info,
3385		   struct drm_i915_gem_object *obj)
3386{
3387	unsigned int size = intel_rotation_info_size(rot_info);
3388	struct sg_table *st;
3389	struct scatterlist *sg;
3390	int ret = -ENOMEM;
3391	int i;
3392
3393	/* Allocate target SG list. */
3394	st = kmalloc(sizeof(*st), GFP_KERNEL);
3395	if (!st)
3396		goto err_st_alloc;
3397
3398	ret = sg_alloc_table(st, size, GFP_KERNEL);
3399	if (ret)
3400		goto err_sg_alloc;
3401
3402	st->nents = 0;
3403	sg = st->sgl;
3404
3405	for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) {
3406		sg = rotate_pages(obj, rot_info->plane[i].offset,
3407				  rot_info->plane[i].width, rot_info->plane[i].height,
3408				  rot_info->plane[i].stride, st, sg);
3409	}
3410
3411	return st;
3412
3413err_sg_alloc:
3414	kfree(st);
3415err_st_alloc:
3416
3417	DRM_DEBUG_DRIVER("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
3418			 obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
3419
3420	return ERR_PTR(ret);
3421}
3422
3423static struct scatterlist *
3424remap_pages(struct drm_i915_gem_object *obj, unsigned int offset,
3425	    unsigned int width, unsigned int height,
3426	    unsigned int stride,
3427	    struct sg_table *st, struct scatterlist *sg)
3428{
3429	unsigned int row;
3430
3431	for (row = 0; row < height; row++) {
3432		unsigned int left = width * I915_GTT_PAGE_SIZE;
3433
3434		while (left) {
3435			dma_addr_t addr;
3436			unsigned int length;
3437
3438			/* We don't need the pages, but need to initialize
3439			 * the entries so the sg list can be happily traversed.
3440			 * The only thing we need are DMA addresses.
3441			 */
3442
3443			addr = i915_gem_object_get_dma_address_len(obj, offset, &length);
3444
3445			length = min(left, length);
3446
3447			st->nents++;
3448
3449			sg_set_page(sg, NULL, length, 0);
3450			sg_dma_address(sg) = addr;
3451			sg_dma_len(sg) = length;
3452			sg = sg_next(sg);
3453
3454			offset += length / I915_GTT_PAGE_SIZE;
3455			left -= length;
3456		}
3457
3458		offset += stride - width;
3459	}
3460
3461	return sg;
3462}
3463
3464static noinline struct sg_table *
3465intel_remap_pages(struct intel_remapped_info *rem_info,
3466		  struct drm_i915_gem_object *obj)
3467{
3468	unsigned int size = intel_remapped_info_size(rem_info);
3469	struct sg_table *st;
3470	struct scatterlist *sg;
3471	int ret = -ENOMEM;
3472	int i;
3473
3474	/* Allocate target SG list. */
3475	st = kmalloc(sizeof(*st), GFP_KERNEL);
3476	if (!st)
3477		goto err_st_alloc;
3478
3479	ret = sg_alloc_table(st, size, GFP_KERNEL);
3480	if (ret)
3481		goto err_sg_alloc;
3482
3483	st->nents = 0;
3484	sg = st->sgl;
3485
3486	for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) {
3487		sg = remap_pages(obj, rem_info->plane[i].offset,
3488				 rem_info->plane[i].width, rem_info->plane[i].height,
3489				 rem_info->plane[i].stride, st, sg);
3490	}
3491
3492	i915_sg_trim(st);
3493
3494	return st;
3495
3496err_sg_alloc:
3497	kfree(st);
3498err_st_alloc:
3499
3500	DRM_DEBUG_DRIVER("Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n",
3501			 obj->base.size, rem_info->plane[0].width, rem_info->plane[0].height, size);
3502
3503	return ERR_PTR(ret);
3504}
3505
3506static noinline struct sg_table *
3507intel_partial_pages(const struct i915_ggtt_view *view,
3508		    struct drm_i915_gem_object *obj)
3509{
3510	struct sg_table *st;
3511	struct scatterlist *sg, *iter;
3512	unsigned int count = view->partial.size;
3513	unsigned int offset;
3514	int ret = -ENOMEM;
3515
3516	st = kmalloc(sizeof(*st), GFP_KERNEL);
3517	if (!st)
3518		goto err_st_alloc;
3519
3520	ret = sg_alloc_table(st, count, GFP_KERNEL);
3521	if (ret)
3522		goto err_sg_alloc;
3523
3524	iter = i915_gem_object_get_sg(obj, view->partial.offset, &offset);
3525	GEM_BUG_ON(!iter);
3526
3527	sg = st->sgl;
3528	st->nents = 0;
3529	do {
3530		unsigned int len;
3531
3532		len = min(iter->length - (offset << PAGE_SHIFT),
3533			  count << PAGE_SHIFT);
3534		sg_set_page(sg, NULL, len, 0);
3535		sg_dma_address(sg) =
3536			sg_dma_address(iter) + (offset << PAGE_SHIFT);
3537		sg_dma_len(sg) = len;
3538
3539		st->nents++;
3540		count -= len >> PAGE_SHIFT;
3541		if (count == 0) {
3542			sg_mark_end(sg);
3543			i915_sg_trim(st); /* Drop any unused tail entries. */
3544
3545			return st;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3546		}
3547
3548		sg = __sg_next(sg);
3549		iter = __sg_next(iter);
3550		offset = 0;
3551	} while (1);
3552
3553err_sg_alloc:
3554	kfree(st);
3555err_st_alloc:
3556	return ERR_PTR(ret);
3557}
3558
3559static int
3560i915_get_ggtt_vma_pages(struct i915_vma *vma)
3561{
3562	int ret;
3563
3564	/* The vma->pages are only valid within the lifespan of the borrowed
3565	 * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so
3566	 * must be the vma->pages. A simple rule is that vma->pages must only
3567	 * be accessed when the obj->mm.pages are pinned.
3568	 */
3569	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
3570
3571	switch (vma->ggtt_view.type) {
3572	default:
3573		GEM_BUG_ON(vma->ggtt_view.type);
3574		/* fall through */
3575	case I915_GGTT_VIEW_NORMAL:
3576		vma->pages = vma->obj->mm.pages;
3577		return 0;
3578
3579	case I915_GGTT_VIEW_ROTATED:
3580		vma->pages =
3581			intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj);
3582		break;
3583
3584	case I915_GGTT_VIEW_REMAPPED:
3585		vma->pages =
3586			intel_remap_pages(&vma->ggtt_view.remapped, vma->obj);
3587		break;
3588
3589	case I915_GGTT_VIEW_PARTIAL:
3590		vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
 
 
 
 
3591		break;
 
 
3592	}
3593
3594	ret = 0;
3595	if (IS_ERR(vma->pages)) {
3596		ret = PTR_ERR(vma->pages);
3597		vma->pages = NULL;
3598		DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n",
3599			  vma->ggtt_view.type, ret);
3600	}
3601	return ret;
3602}
3603
3604/**
3605 * i915_gem_gtt_reserve - reserve a node in an address_space (GTT)
3606 * @vm: the &struct i915_address_space
3607 * @node: the &struct drm_mm_node (typically i915_vma.mode)
3608 * @size: how much space to allocate inside the GTT,
3609 *        must be #I915_GTT_PAGE_SIZE aligned
3610 * @offset: where to insert inside the GTT,
3611 *          must be #I915_GTT_MIN_ALIGNMENT aligned, and the node
3612 *          (@offset + @size) must fit within the address space
3613 * @color: color to apply to node, if this node is not from a VMA,
3614 *         color must be #I915_COLOR_UNEVICTABLE
3615 * @flags: control search and eviction behaviour
3616 *
3617 * i915_gem_gtt_reserve() tries to insert the @node at the exact @offset inside
3618 * the address space (using @size and @color). If the @node does not fit, it
3619 * tries to evict any overlapping nodes from the GTT, including any
3620 * neighbouring nodes if the colors do not match (to ensure guard pages between
3621 * differing domains). See i915_gem_evict_for_node() for the gory details
3622 * on the eviction algorithm. #PIN_NONBLOCK may used to prevent waiting on
3623 * evicting active overlapping objects, and any overlapping node that is pinned
3624 * or marked as unevictable will also result in failure.
3625 *
3626 * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
3627 * asked to wait for eviction and interrupted.
3628 */
3629int i915_gem_gtt_reserve(struct i915_address_space *vm,
3630			 struct drm_mm_node *node,
3631			 u64 size, u64 offset, unsigned long color,
3632			 unsigned int flags)
3633{
3634	int err;
3635
3636	GEM_BUG_ON(!size);
3637	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
3638	GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT));
3639	GEM_BUG_ON(range_overflows(offset, size, vm->total));
3640	GEM_BUG_ON(vm == &vm->i915->ggtt.alias->vm);
3641	GEM_BUG_ON(drm_mm_node_allocated(node));
3642
3643	node->size = size;
3644	node->start = offset;
3645	node->color = color;
3646
3647	err = drm_mm_reserve_node(&vm->mm, node);
3648	if (err != -ENOSPC)
3649		return err;
3650
3651	if (flags & PIN_NOEVICT)
3652		return -ENOSPC;
3653
3654	err = i915_gem_evict_for_node(vm, node, flags);
3655	if (err == 0)
3656		err = drm_mm_reserve_node(&vm->mm, node);
3657
3658	return err;
3659}
3660
3661static u64 random_offset(u64 start, u64 end, u64 len, u64 align)
 
 
3662{
3663	u64 range, addr;
3664
3665	GEM_BUG_ON(range_overflows(start, len, end));
3666	GEM_BUG_ON(round_up(start, align) > round_down(end - len, align));
3667
3668	range = round_down(end - len, align) - round_up(start, align);
3669	if (range) {
3670		if (sizeof(unsigned long) == sizeof(u64)) {
3671			addr = get_random_long();
3672		} else {
3673			addr = get_random_int();
3674			if (range > U32_MAX) {
3675				addr <<= 32;
3676				addr |= get_random_int();
3677			}
3678		}
3679		div64_u64_rem(addr, range, &addr);
3680		start += addr;
3681	}
3682
3683	return round_up(start, align);
3684}
3685
3686/**
3687 * i915_gem_gtt_insert - insert a node into an address_space (GTT)
3688 * @vm: the &struct i915_address_space
3689 * @node: the &struct drm_mm_node (typically i915_vma.node)
3690 * @size: how much space to allocate inside the GTT,
3691 *        must be #I915_GTT_PAGE_SIZE aligned
3692 * @alignment: required alignment of starting offset, may be 0 but
3693 *             if specified, this must be a power-of-two and at least
3694 *             #I915_GTT_MIN_ALIGNMENT
3695 * @color: color to apply to node
3696 * @start: start of any range restriction inside GTT (0 for all),
3697 *         must be #I915_GTT_PAGE_SIZE aligned
3698 * @end: end of any range restriction inside GTT (U64_MAX for all),
3699 *       must be #I915_GTT_PAGE_SIZE aligned if not U64_MAX
3700 * @flags: control search and eviction behaviour
3701 *
3702 * i915_gem_gtt_insert() first searches for an available hole into which
3703 * is can insert the node. The hole address is aligned to @alignment and
3704 * its @size must then fit entirely within the [@start, @end] bounds. The
3705 * nodes on either side of the hole must match @color, or else a guard page
3706 * will be inserted between the two nodes (or the node evicted). If no
3707 * suitable hole is found, first a victim is randomly selected and tested
3708 * for eviction, otherwise then the LRU list of objects within the GTT
3709 * is scanned to find the first set of replacement nodes to create the hole.
3710 * Those old overlapping nodes are evicted from the GTT (and so must be
3711 * rebound before any future use). Any node that is currently pinned cannot
3712 * be evicted (see i915_vma_pin()). Similar if the node's VMA is currently
3713 * active and #PIN_NONBLOCK is specified, that node is also skipped when
3714 * searching for an eviction candidate. See i915_gem_evict_something() for
3715 * the gory details on the eviction algorithm.
3716 *
3717 * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
3718 * asked to wait for eviction and interrupted.
3719 */
3720int i915_gem_gtt_insert(struct i915_address_space *vm,
3721			struct drm_mm_node *node,
3722			u64 size, u64 alignment, unsigned long color,
3723			u64 start, u64 end, unsigned int flags)
3724{
3725	enum drm_mm_insert_mode mode;
3726	u64 offset;
3727	int err;
3728
3729	lockdep_assert_held(&vm->i915->drm.struct_mutex);
3730	GEM_BUG_ON(!size);
3731	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
3732	GEM_BUG_ON(alignment && !is_power_of_2(alignment));
3733	GEM_BUG_ON(alignment && !IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
3734	GEM_BUG_ON(start >= end);
3735	GEM_BUG_ON(start > 0  && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
3736	GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
3737	GEM_BUG_ON(vm == &vm->i915->ggtt.alias->vm);
3738	GEM_BUG_ON(drm_mm_node_allocated(node));
3739
3740	if (unlikely(range_overflows(start, size, end)))
3741		return -ENOSPC;
3742
3743	if (unlikely(round_up(start, alignment) > round_down(end - size, alignment)))
3744		return -ENOSPC;
3745
3746	mode = DRM_MM_INSERT_BEST;
3747	if (flags & PIN_HIGH)
3748		mode = DRM_MM_INSERT_HIGHEST;
3749	if (flags & PIN_MAPPABLE)
3750		mode = DRM_MM_INSERT_LOW;
3751
3752	/* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
3753	 * so we know that we always have a minimum alignment of 4096.
3754	 * The drm_mm range manager is optimised to return results
3755	 * with zero alignment, so where possible use the optimal
3756	 * path.
3757	 */
3758	BUILD_BUG_ON(I915_GTT_MIN_ALIGNMENT > I915_GTT_PAGE_SIZE);
3759	if (alignment <= I915_GTT_MIN_ALIGNMENT)
3760		alignment = 0;
3761
3762	err = drm_mm_insert_node_in_range(&vm->mm, node,
3763					  size, alignment, color,
3764					  start, end, mode);
3765	if (err != -ENOSPC)
3766		return err;
3767
3768	if (mode & DRM_MM_INSERT_ONCE) {
3769		err = drm_mm_insert_node_in_range(&vm->mm, node,
3770						  size, alignment, color,
3771						  start, end,
3772						  DRM_MM_INSERT_BEST);
3773		if (err != -ENOSPC)
3774			return err;
3775	}
3776
3777	if (flags & PIN_NOEVICT)
3778		return -ENOSPC;
3779
3780	/*
3781	 * No free space, pick a slot at random.
3782	 *
3783	 * There is a pathological case here using a GTT shared between
3784	 * mmap and GPU (i.e. ggtt/aliasing_ppgtt but not full-ppgtt):
3785	 *
3786	 *    |<-- 256 MiB aperture -->||<-- 1792 MiB unmappable -->|
3787	 *         (64k objects)             (448k objects)
3788	 *
3789	 * Now imagine that the eviction LRU is ordered top-down (just because
3790	 * pathology meets real life), and that we need to evict an object to
3791	 * make room inside the aperture. The eviction scan then has to walk
3792	 * the 448k list before it finds one within range. And now imagine that
3793	 * it has to search for a new hole between every byte inside the memcpy,
3794	 * for several simultaneous clients.
3795	 *
3796	 * On a full-ppgtt system, if we have run out of available space, there
3797	 * will be lots and lots of objects in the eviction list! Again,
3798	 * searching that LRU list may be slow if we are also applying any
3799	 * range restrictions (e.g. restriction to low 4GiB) and so, for
3800	 * simplicity and similarilty between different GTT, try the single
3801	 * random replacement first.
3802	 */
3803	offset = random_offset(start, end,
3804			       size, alignment ?: I915_GTT_MIN_ALIGNMENT);
3805	err = i915_gem_gtt_reserve(vm, node, size, offset, color, flags);
3806	if (err != -ENOSPC)
3807		return err;
3808
3809	if (flags & PIN_NOSEARCH)
3810		return -ENOSPC;
 
3811
3812	/* Randomly selected placement is pinned, do a search */
3813	err = i915_gem_evict_something(vm, size, alignment, color,
3814				       start, end, flags);
3815	if (err)
3816		return err;
3817
3818	return drm_mm_insert_node_in_range(&vm->mm, node,
3819					   size, alignment, color,
3820					   start, end, DRM_MM_INSERT_EVICT);
3821}
3822
3823#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
3824#include "selftests/mock_gtt.c"
3825#include "selftests/i915_gem_gtt.c"
3826#endif
v3.15
   1/*
   2 * Copyright © 2010 Daniel Vetter
   3 * Copyright © 2011-2014 Intel Corporation
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the "Software"),
   7 * to deal in the Software without restriction, including without limitation
   8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   9 * and/or sell copies of the Software, and to permit persons to whom the
  10 * Software is furnished to do so, subject to the following conditions:
  11 *
  12 * The above copyright notice and this permission notice (including the next
  13 * paragraph) shall be included in all copies or substantial portions of the
  14 * Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  22 * IN THE SOFTWARE.
  23 *
  24 */
  25
 
 
 
 
 
  26#include <linux/seq_file.h>
  27#include <drm/drmP.h>
 
 
 
 
  28#include <drm/i915_drm.h>
 
 
 
 
  29#include "i915_drv.h"
 
  30#include "i915_trace.h"
  31#include "intel_drv.h"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  32
  33static void gen8_setup_private_ppat(struct drm_i915_private *dev_priv);
 
  34
  35bool intel_enable_ppgtt(struct drm_device *dev, bool full)
  36{
  37	if (i915.enable_ppgtt == 0)
  38		return false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  39
  40	if (i915.enable_ppgtt == 1 && full)
  41		return false;
  42
  43	return true;
 
 
  44}
  45
  46static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
  47{
  48	if (enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev))
  49		return 0;
 
 
 
  50
  51	if (enable_ppgtt == 1)
  52		return 1;
  53
  54	if (enable_ppgtt == 2 && HAS_PPGTT(dev))
  55		return 2;
 
  56
  57#ifdef CONFIG_INTEL_IOMMU
  58	/* Disable ppgtt on SNB if VT-d is on. */
  59	if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) {
  60		DRM_INFO("Disabling PPGTT because VT-d is on\n");
  61		return 0;
  62	}
  63#endif
  64
  65	return HAS_ALIASING_PPGTT(dev) ? 1 : 0;
  66}
  67
  68#define GEN6_PPGTT_PD_ENTRIES 512
  69#define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t))
  70typedef uint64_t gen8_gtt_pte_t;
  71typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
  72
  73/* PPGTT stuff */
  74#define GEN6_GTT_ADDR_ENCODE(addr)	((addr) | (((addr) >> 28) & 0xff0))
  75#define HSW_GTT_ADDR_ENCODE(addr)	((addr) | (((addr) >> 28) & 0x7f0))
  76
  77#define GEN6_PDE_VALID			(1 << 0)
  78/* gen6+ has bit 11-4 for physical addr bit 39-32 */
  79#define GEN6_PDE_ADDR_ENCODE(addr)	GEN6_GTT_ADDR_ENCODE(addr)
  80
  81#define GEN6_PTE_VALID			(1 << 0)
  82#define GEN6_PTE_UNCACHED		(1 << 1)
  83#define HSW_PTE_UNCACHED		(0)
  84#define GEN6_PTE_CACHE_LLC		(2 << 1)
  85#define GEN7_PTE_CACHE_L3_LLC		(3 << 1)
  86#define GEN6_PTE_ADDR_ENCODE(addr)	GEN6_GTT_ADDR_ENCODE(addr)
  87#define HSW_PTE_ADDR_ENCODE(addr)	HSW_GTT_ADDR_ENCODE(addr)
  88
  89/* Cacheability Control is a 4-bit value. The low three bits are stored in *
  90 * bits 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
  91 */
  92#define HSW_CACHEABILITY_CONTROL(bits)	((((bits) & 0x7) << 1) | \
  93					 (((bits) & 0x8) << (11 - 3)))
  94#define HSW_WB_LLC_AGE3			HSW_CACHEABILITY_CONTROL(0x2)
  95#define HSW_WB_LLC_AGE0			HSW_CACHEABILITY_CONTROL(0x3)
  96#define HSW_WB_ELLC_LLC_AGE0		HSW_CACHEABILITY_CONTROL(0xb)
  97#define HSW_WB_ELLC_LLC_AGE3		HSW_CACHEABILITY_CONTROL(0x8)
  98#define HSW_WT_ELLC_LLC_AGE0		HSW_CACHEABILITY_CONTROL(0x6)
  99#define HSW_WT_ELLC_LLC_AGE3		HSW_CACHEABILITY_CONTROL(0x7)
 100
 101#define GEN8_PTES_PER_PAGE		(PAGE_SIZE / sizeof(gen8_gtt_pte_t))
 102#define GEN8_PDES_PER_PAGE		(PAGE_SIZE / sizeof(gen8_ppgtt_pde_t))
 103
 104/* GEN8 legacy style addressis defined as a 3 level page table:
 105 * 31:30 | 29:21 | 20:12 |  11:0
 106 * PDPE  |  PDE  |  PTE  | offset
 107 * The difference as compared to normal x86 3 level page table is the PDPEs are
 108 * programmed via register.
 109 */
 110#define GEN8_PDPE_SHIFT			30
 111#define GEN8_PDPE_MASK			0x3
 112#define GEN8_PDE_SHIFT			21
 113#define GEN8_PDE_MASK			0x1ff
 114#define GEN8_PTE_SHIFT			12
 115#define GEN8_PTE_MASK			0x1ff
 116
 117#define PPAT_UNCACHED_INDEX		(_PAGE_PWT | _PAGE_PCD)
 118#define PPAT_CACHED_PDE_INDEX		0 /* WB LLC */
 119#define PPAT_CACHED_INDEX		_PAGE_PAT /* WB LLCeLLC */
 120#define PPAT_DISPLAY_ELLC_INDEX		_PAGE_PCD /* WT eLLC */
 121
 122static void ppgtt_bind_vma(struct i915_vma *vma,
 123			   enum i915_cache_level cache_level,
 124			   u32 flags);
 125static void ppgtt_unbind_vma(struct i915_vma *vma);
 126static int gen8_ppgtt_enable(struct i915_hw_ppgtt *ppgtt);
 127
 128static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr,
 129					     enum i915_cache_level level,
 130					     bool valid)
 131{
 132	gen8_gtt_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0;
 133	pte |= addr;
 134	if (level != I915_CACHE_NONE)
 135		pte |= PPAT_CACHED_INDEX;
 136	else
 137		pte |= PPAT_UNCACHED_INDEX;
 138	return pte;
 139}
 140
 141static inline gen8_ppgtt_pde_t gen8_pde_encode(struct drm_device *dev,
 142					     dma_addr_t addr,
 143					     enum i915_cache_level level)
 144{
 145	gen8_ppgtt_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
 146	pde |= addr;
 147	if (level != I915_CACHE_NONE)
 148		pde |= PPAT_CACHED_PDE_INDEX;
 149	else
 150		pde |= PPAT_UNCACHED_INDEX;
 151	return pde;
 152}
 153
 154static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
 155				     enum i915_cache_level level,
 156				     bool valid)
 157{
 158	gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
 159	pte |= GEN6_PTE_ADDR_ENCODE(addr);
 160
 161	switch (level) {
 162	case I915_CACHE_L3_LLC:
 163	case I915_CACHE_LLC:
 164		pte |= GEN6_PTE_CACHE_LLC;
 165		break;
 166	case I915_CACHE_NONE:
 167		pte |= GEN6_PTE_UNCACHED;
 168		break;
 169	default:
 170		WARN_ON(1);
 171	}
 172
 173	return pte;
 174}
 175
 176static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
 177				     enum i915_cache_level level,
 178				     bool valid)
 179{
 180	gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
 181	pte |= GEN6_PTE_ADDR_ENCODE(addr);
 182
 183	switch (level) {
 184	case I915_CACHE_L3_LLC:
 185		pte |= GEN7_PTE_CACHE_L3_LLC;
 186		break;
 187	case I915_CACHE_LLC:
 188		pte |= GEN6_PTE_CACHE_LLC;
 189		break;
 190	case I915_CACHE_NONE:
 191		pte |= GEN6_PTE_UNCACHED;
 192		break;
 193	default:
 194		WARN_ON(1);
 195	}
 196
 197	return pte;
 198}
 199
 200#define BYT_PTE_WRITEABLE		(1 << 1)
 201#define BYT_PTE_SNOOPED_BY_CPU_CACHES	(1 << 2)
 202
 203static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
 204				     enum i915_cache_level level,
 205				     bool valid)
 206{
 207	gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
 208	pte |= GEN6_PTE_ADDR_ENCODE(addr);
 209
 210	/* Mark the page as writeable.  Other platforms don't have a
 211	 * setting for read-only/writable, so this matches that behavior.
 212	 */
 213	pte |= BYT_PTE_WRITEABLE;
 214
 215	if (level != I915_CACHE_NONE)
 216		pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
 217
 218	return pte;
 219}
 220
 221static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
 222				     enum i915_cache_level level,
 223				     bool valid)
 224{
 225	gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
 226	pte |= HSW_PTE_ADDR_ENCODE(addr);
 227
 228	if (level != I915_CACHE_NONE)
 229		pte |= HSW_WB_LLC_AGE3;
 230
 231	return pte;
 232}
 233
 234static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
 235				      enum i915_cache_level level,
 236				      bool valid)
 237{
 238	gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
 239	pte |= HSW_PTE_ADDR_ENCODE(addr);
 240
 241	switch (level) {
 242	case I915_CACHE_NONE:
 243		break;
 244	case I915_CACHE_WT:
 245		pte |= HSW_WT_ELLC_LLC_AGE3;
 246		break;
 247	default:
 248		pte |= HSW_WB_ELLC_LLC_AGE3;
 249		break;
 250	}
 251
 252	return pte;
 253}
 254
 255/* Broadwell Page Directory Pointer Descriptors */
 256static int gen8_write_pdp(struct intel_ring_buffer *ring, unsigned entry,
 257			   uint64_t val, bool synchronous)
 258{
 259	struct drm_i915_private *dev_priv = ring->dev->dev_private;
 260	int ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 261
 262	BUG_ON(entry >= 4);
 
 
 263
 264	if (synchronous) {
 265		I915_WRITE(GEN8_RING_PDP_UDW(ring, entry), val >> 32);
 266		I915_WRITE(GEN8_RING_PDP_LDW(ring, entry), (u32)val);
 267		return 0;
 268	}
 269
 270	ret = intel_ring_begin(ring, 6);
 271	if (ret)
 272		return ret;
 
 
 273
 274	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
 275	intel_ring_emit(ring, GEN8_RING_PDP_UDW(ring, entry));
 276	intel_ring_emit(ring, (u32)(val >> 32));
 277	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
 278	intel_ring_emit(ring, GEN8_RING_PDP_LDW(ring, entry));
 279	intel_ring_emit(ring, (u32)(val));
 280	intel_ring_advance(ring);
 281
 282	return 0;
 283}
 284
 285static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt,
 286			  struct intel_ring_buffer *ring,
 287			  bool synchronous)
 288{
 289	int i, ret;
 
 
 
 
 290
 291	/* bit of a hack to find the actual last used pd */
 292	int used_pd = ppgtt->num_pd_entries / GEN8_PDES_PER_PAGE;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 293
 294	for (i = used_pd - 1; i >= 0; i--) {
 295		dma_addr_t addr = ppgtt->pd_dma_addr[i];
 296		ret = gen8_write_pdp(ring, i, addr, synchronous);
 297		if (ret)
 298			return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 299	}
 300
 301	return 0;
 302}
 303
 304static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
 305				   uint64_t start,
 306				   uint64_t length,
 307				   bool use_scratch)
 308{
 309	struct i915_hw_ppgtt *ppgtt =
 310		container_of(vm, struct i915_hw_ppgtt, base);
 311	gen8_gtt_pte_t *pt_vaddr, scratch_pte;
 312	unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK;
 313	unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK;
 314	unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK;
 315	unsigned num_entries = length >> PAGE_SHIFT;
 316	unsigned last_pte, i;
 317
 318	scratch_pte = gen8_pte_encode(ppgtt->base.scratch.addr,
 319				      I915_CACHE_LLC, use_scratch);
 320
 321	while (num_entries) {
 322		struct page *page_table = ppgtt->gen8_pt_pages[pdpe][pde];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 323
 324		last_pte = pte + num_entries;
 325		if (last_pte > GEN8_PTES_PER_PAGE)
 326			last_pte = GEN8_PTES_PER_PAGE;
 327
 328		pt_vaddr = kmap_atomic(page_table);
 
 329
 330		for (i = pte; i < last_pte; i++) {
 331			pt_vaddr[i] = scratch_pte;
 332			num_entries--;
 333		}
 
 
 
 334
 335		kunmap_atomic(pt_vaddr);
 
 
 336
 337		pte = 0;
 338		if (++pde == GEN8_PDES_PER_PAGE) {
 339			pdpe++;
 340			pde = 0;
 341		}
 342	}
 
 343}
 344
 345static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
 346				      struct sg_table *pages,
 347				      uint64_t start,
 348				      enum i915_cache_level cache_level)
 349{
 350	struct i915_hw_ppgtt *ppgtt =
 351		container_of(vm, struct i915_hw_ppgtt, base);
 352	gen8_gtt_pte_t *pt_vaddr;
 353	unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK;
 354	unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK;
 355	unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK;
 356	struct sg_page_iter sg_iter;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 357
 358	pt_vaddr = NULL;
 
 
 359
 360	for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
 361		if (WARN_ON(pdpe >= GEN8_LEGACY_PDPS))
 362			break;
 363
 364		if (pt_vaddr == NULL)
 365			pt_vaddr = kmap_atomic(ppgtt->gen8_pt_pages[pdpe][pde]);
 
 366
 367		pt_vaddr[pte] =
 368			gen8_pte_encode(sg_page_iter_dma_address(&sg_iter),
 369					cache_level, true);
 370		if (++pte == GEN8_PTES_PER_PAGE) {
 371			kunmap_atomic(pt_vaddr);
 372			pt_vaddr = NULL;
 373			if (++pde == GEN8_PDES_PER_PAGE) {
 374				pdpe++;
 375				pde = 0;
 376			}
 377			pte = 0;
 378		}
 
 
 
 
 379	}
 380	if (pt_vaddr)
 381		kunmap_atomic(pt_vaddr);
 
 
 
 
 
 
 382}
 383
 384static void gen8_free_page_tables(struct page **pt_pages)
 
 385{
 386	int i;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 387
 388	if (pt_pages == NULL)
 389		return;
 
 
 390
 391	for (i = 0; i < GEN8_PDES_PER_PAGE; i++)
 392		if (pt_pages[i])
 393			__free_pages(pt_pages[i], 0);
 394}
 395
 396static void gen8_ppgtt_free(const struct i915_hw_ppgtt *ppgtt)
 397{
 398	int i;
 399
 400	for (i = 0; i < ppgtt->num_pd_pages; i++) {
 401		gen8_free_page_tables(ppgtt->gen8_pt_pages[i]);
 402		kfree(ppgtt->gen8_pt_pages[i]);
 403		kfree(ppgtt->gen8_pt_dma_addr[i]);
 
 
 
 404	}
 405
 406	__free_pages(ppgtt->pd_pages, get_order(ppgtt->num_pd_pages << PAGE_SHIFT));
 407}
 408
 409static void gen8_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt)
 410{
 411	struct pci_dev *hwdev = ppgtt->base.dev->pdev;
 412	int i, j;
 413
 414	for (i = 0; i < ppgtt->num_pd_pages; i++) {
 415		/* TODO: In the future we'll support sparse mappings, so this
 416		 * will have to change. */
 417		if (!ppgtt->pd_dma_addr[i])
 418			continue;
 419
 420		pci_unmap_page(hwdev, ppgtt->pd_dma_addr[i], PAGE_SIZE,
 421			       PCI_DMA_BIDIRECTIONAL);
 
 
 422
 423		for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
 424			dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j];
 425			if (addr)
 426				pci_unmap_page(hwdev, addr, PAGE_SIZE,
 427					       PCI_DMA_BIDIRECTIONAL);
 428		}
 429	}
 430}
 431
 432static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
 433{
 434	struct i915_hw_ppgtt *ppgtt =
 435		container_of(vm, struct i915_hw_ppgtt, base);
 436
 437	list_del(&vm->global_link);
 438	drm_mm_takedown(&vm->mm);
 
 439
 440	gen8_ppgtt_unmap_pages(ppgtt);
 441	gen8_ppgtt_free(ppgtt);
 442}
 443
 444static struct page **__gen8_alloc_page_tables(void)
 445{
 446	struct page **pt_pages;
 447	int i;
 448
 449	pt_pages = kcalloc(GEN8_PDES_PER_PAGE, sizeof(struct page *), GFP_KERNEL);
 450	if (!pt_pages)
 451		return ERR_PTR(-ENOMEM);
 452
 453	for (i = 0; i < GEN8_PDES_PER_PAGE; i++) {
 454		pt_pages[i] = alloc_page(GFP_KERNEL);
 455		if (!pt_pages[i])
 456			goto bail;
 457	}
 458
 459	return pt_pages;
 
 460
 461bail:
 462	gen8_free_page_tables(pt_pages);
 463	kfree(pt_pages);
 464	return ERR_PTR(-ENOMEM);
 465}
 466
 467static int gen8_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt,
 468					   const int max_pdp)
 
 
 
 
 469{
 470	struct page **pt_pages[GEN8_LEGACY_PDPS];
 471	int i, ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 472
 473	for (i = 0; i < max_pdp; i++) {
 474		pt_pages[i] = __gen8_alloc_page_tables();
 475		if (IS_ERR(pt_pages[i])) {
 476			ret = PTR_ERR(pt_pages[i]);
 477			goto unwind_out;
 478		}
 479	}
 
 
 
 
 480
 481	/* NB: Avoid touching gen8_pt_pages until last to keep the allocation,
 482	 * "atomic" - for cleanup purposes.
 483	 */
 484	for (i = 0; i < max_pdp; i++)
 485		ppgtt->gen8_pt_pages[i] = pt_pages[i];
 
 
 486
 487	return 0;
 
 488
 489unwind_out:
 490	while (i--) {
 491		gen8_free_page_tables(pt_pages[i]);
 492		kfree(pt_pages[i]);
 493	}
 
 
 
 
 494
 495	return ret;
 
 
 
 
 
 
 
 
 496}
 497
 498static int gen8_ppgtt_allocate_dma(struct i915_hw_ppgtt *ppgtt)
 499{
 
 
 500	int i;
 501
 502	for (i = 0; i < ppgtt->num_pd_pages; i++) {
 503		ppgtt->gen8_pt_dma_addr[i] = kcalloc(GEN8_PDES_PER_PAGE,
 504						     sizeof(dma_addr_t),
 505						     GFP_KERNEL);
 506		if (!ppgtt->gen8_pt_dma_addr[i])
 507			return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 508	}
 509
 510	return 0;
 
 
 
 511}
 512
 513static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt,
 514						const int max_pdp)
 
 
 
 
 
 
 
 
 
 515{
 516	ppgtt->pd_pages = alloc_pages(GFP_KERNEL, get_order(max_pdp << PAGE_SHIFT));
 517	if (!ppgtt->pd_pages)
 518		return -ENOMEM;
 519
 520	ppgtt->num_pd_pages = 1 << get_order(max_pdp << PAGE_SHIFT);
 521	BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPS);
 522
 523	return 0;
 
 
 
 
 524}
 525
 526static int gen8_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt,
 527			    const int max_pdp)
 528{
 529	int ret;
 
 
 
 
 530
 531	ret = gen8_ppgtt_allocate_page_directories(ppgtt, max_pdp);
 532	if (ret)
 533		return ret;
 
 
 
 
 
 534
 535	ret = gen8_ppgtt_allocate_page_tables(ppgtt, max_pdp);
 536	if (ret) {
 537		__free_pages(ppgtt->pd_pages, get_order(max_pdp << PAGE_SHIFT));
 538		return ret;
 539	}
 540
 541	ppgtt->num_pd_entries = max_pdp * GEN8_PDES_PER_PAGE;
 
 
 
 542
 543	ret = gen8_ppgtt_allocate_dma(ppgtt);
 544	if (ret)
 545		gen8_ppgtt_free(ppgtt);
 
 
 546
 547	return ret;
 
 
 
 548}
 549
 550static int gen8_ppgtt_setup_page_directories(struct i915_hw_ppgtt *ppgtt,
 551					     const int pd)
 
 552{
 553	dma_addr_t pd_addr;
 554	int ret;
 
 
 
 
 
 
 
 
 555
 556	pd_addr = pci_map_page(ppgtt->base.dev->pdev,
 557			       &ppgtt->pd_pages[pd], 0,
 558			       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
 559
 560	ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pd_addr);
 561	if (ret)
 562		return ret;
 563
 564	ppgtt->pd_dma_addr[pd] = pd_addr;
 
 565
 566	return 0;
 
 567}
 568
 569static int gen8_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt,
 570					const int pd,
 571					const int pt)
 572{
 573	dma_addr_t pt_addr;
 574	struct page *p;
 575	int ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 576
 577	p = ppgtt->gen8_pt_pages[pd][pt];
 578	pt_addr = pci_map_page(ppgtt->base.dev->pdev,
 579			       p, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
 580	ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pt_addr);
 581	if (ret)
 582		return ret;
 583
 584	ppgtt->gen8_pt_dma_addr[pd][pt] = pt_addr;
 
 
 585
 586	return 0;
 587}
 588
 589/**
 590 * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
 591 * with a net effect resembling a 2-level page table in normal x86 terms. Each
 592 * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
 593 * space.
 594 *
 595 * FIXME: split allocation into smaller pieces. For now we only ever do this
 596 * once, but with full PPGTT, the multiple contiguous allocations will be bad.
 597 * TODO: Do something with the size parameter
 598 */
 599static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
 600{
 601	const int max_pdp = DIV_ROUND_UP(size, 1 << 30);
 602	const int min_pt_pages = GEN8_PDES_PER_PAGE * max_pdp;
 603	int i, j, ret;
 604
 605	if (size % (1<<30))
 606		DRM_INFO("Pages will be wasted unless GTT size (%llu) is divisible by 1GB\n", size);
 
 607
 608	/* 1. Do all our allocations for page directories and page tables. */
 609	ret = gen8_ppgtt_alloc(ppgtt, max_pdp);
 610	if (ret)
 611		return ret;
 
 
 
 
 
 
 
 
 
 
 612
 613	/*
 614	 * 2. Create DMA mappings for the page directories and page tables.
 615	 */
 616	for (i = 0; i < max_pdp; i++) {
 617		ret = gen8_ppgtt_setup_page_directories(ppgtt, i);
 618		if (ret)
 619			goto bail;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 620
 621		for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
 622			ret = gen8_ppgtt_setup_page_tables(ppgtt, i, j);
 623			if (ret)
 624				goto bail;
 
 625		}
 626	}
 627
 628	/*
 629	 * 3. Map all the page directory entires to point to the page tables
 630	 * we've allocated.
 631	 *
 632	 * For now, the PPGTT helper functions all require that the PDEs are
 633	 * plugged in correctly. So we do that now/here. For aliasing PPGTT, we
 634	 * will never need to touch the PDEs again.
 635	 */
 636	for (i = 0; i < max_pdp; i++) {
 637		gen8_ppgtt_pde_t *pd_vaddr;
 638		pd_vaddr = kmap_atomic(&ppgtt->pd_pages[i]);
 639		for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
 640			dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j];
 641			pd_vaddr[j] = gen8_pde_encode(ppgtt->base.dev, addr,
 642						      I915_CACHE_LLC);
 643		}
 644		kunmap_atomic(pd_vaddr);
 645	}
 646
 647	ppgtt->enable = gen8_ppgtt_enable;
 648	ppgtt->switch_mm = gen8_mm_switch;
 649	ppgtt->base.clear_range = gen8_ppgtt_clear_range;
 650	ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
 651	ppgtt->base.cleanup = gen8_ppgtt_cleanup;
 652	ppgtt->base.start = 0;
 653	ppgtt->base.total = ppgtt->num_pd_entries * GEN8_PTES_PER_PAGE * PAGE_SIZE;
 654
 655	ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true);
 656
 657	DRM_DEBUG_DRIVER("Allocated %d pages for page directories (%d wasted)\n",
 658			 ppgtt->num_pd_pages, ppgtt->num_pd_pages - max_pdp);
 659	DRM_DEBUG_DRIVER("Allocated %d pages for page tables (%lld wasted)\n",
 660			 ppgtt->num_pd_entries,
 661			 (ppgtt->num_pd_entries - min_pt_pages) + size % (1<<30));
 662	return 0;
 663
 664bail:
 665	gen8_ppgtt_unmap_pages(ppgtt);
 666	gen8_ppgtt_free(ppgtt);
 667	return ret;
 668}
 669
 670static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
 
 671{
 672	struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
 673	struct i915_address_space *vm = &ppgtt->base;
 674	gen6_gtt_pte_t __iomem *pd_addr;
 675	gen6_gtt_pte_t scratch_pte;
 676	uint32_t pd_entry;
 677	int pte, pde;
 678
 679	scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true);
 680
 681	pd_addr = (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm +
 682		ppgtt->pd_offset / sizeof(gen6_gtt_pte_t);
 683
 684	seq_printf(m, "  VM %p (pd_offset %x-%x):\n", vm,
 685		   ppgtt->pd_offset, ppgtt->pd_offset + ppgtt->num_pd_entries);
 686	for (pde = 0; pde < ppgtt->num_pd_entries; pde++) {
 687		u32 expected;
 688		gen6_gtt_pte_t *pt_vaddr;
 689		dma_addr_t pt_addr = ppgtt->pt_dma_addr[pde];
 690		pd_entry = readl(pd_addr + pde);
 691		expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID);
 692
 693		if (pd_entry != expected)
 694			seq_printf(m, "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n",
 695				   pde,
 696				   pd_entry,
 697				   expected);
 698		seq_printf(m, "\tPDE: %x\n", pd_entry);
 699
 700		pt_vaddr = kmap_atomic(ppgtt->pt_pages[pde]);
 701		for (pte = 0; pte < I915_PPGTT_PT_ENTRIES; pte+=4) {
 702			unsigned long va =
 703				(pde * PAGE_SIZE * I915_PPGTT_PT_ENTRIES) +
 704				(pte * PAGE_SIZE);
 705			int i;
 706			bool found = false;
 707			for (i = 0; i < 4; i++)
 708				if (pt_vaddr[pte + i] != scratch_pte)
 709					found = true;
 710			if (!found)
 711				continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 712
 713			seq_printf(m, "\t\t0x%lx [%03d,%04d]: =", va, pde, pte);
 714			for (i = 0; i < 4; i++) {
 715				if (pt_vaddr[pte + i] != scratch_pte)
 716					seq_printf(m, " %08x", pt_vaddr[pte + i]);
 717				else
 718					seq_puts(m, "  SCRATCH ");
 719			}
 720			seq_puts(m, "\n");
 
 
 721		}
 722		kunmap_atomic(pt_vaddr);
 723	}
 
 
 724}
 725
 726static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt)
 727{
 728	struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
 729	gen6_gtt_pte_t __iomem *pd_addr;
 730	uint32_t pd_entry;
 731	int i;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 732
 733	WARN_ON(ppgtt->pd_offset & 0x3f);
 734	pd_addr = (gen6_gtt_pte_t __iomem*)dev_priv->gtt.gsm +
 735		ppgtt->pd_offset / sizeof(gen6_gtt_pte_t);
 736	for (i = 0; i < ppgtt->num_pd_entries; i++) {
 737		dma_addr_t pt_addr;
 738
 739		pt_addr = ppgtt->pt_dma_addr[i];
 740		pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
 741		pd_entry |= GEN6_PDE_VALID;
 742
 743		writel(pd_entry, pd_addr + i);
 744	}
 745	readl(pd_addr);
 746}
 747
 748static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
 
 
 
 749{
 750	BUG_ON(ppgtt->pd_offset & 0x3f);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 751
 752	return (ppgtt->pd_offset / 64) << 16;
 
 753}
 754
 755static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
 756			 struct intel_ring_buffer *ring,
 757			 bool synchronous)
 758{
 759	struct drm_device *dev = ppgtt->base.dev;
 760	struct drm_i915_private *dev_priv = dev->dev_private;
 761	int ret;
 
 762
 763	/* If we're in reset, we can assume the GPU is sufficiently idle to
 764	 * manually frob these bits. Ideally we could use the ring functions,
 765	 * except our error handling makes it quite difficult (can't use
 766	 * intel_ring_begin, ring->flush, or intel_ring_advance)
 767	 *
 768	 * FIXME: We should try not to special case reset
 769	 */
 770	if (synchronous ||
 771	    i915_reset_in_progress(&dev_priv->gpu_error)) {
 772		WARN_ON(ppgtt != dev_priv->mm.aliasing_ppgtt);
 773		I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
 774		I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
 775		POSTING_READ(RING_PP_DIR_BASE(ring));
 
 
 
 
 776		return 0;
 777	}
 778
 779	/* NB: TLBs must be flushed and invalidated before a switch */
 780	ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
 781	if (ret)
 782		return ret;
 783
 784	ret = intel_ring_begin(ring, 6);
 785	if (ret)
 786		return ret;
 
 
 
 
 787
 788	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
 789	intel_ring_emit(ring, RING_PP_DIR_DCLV(ring));
 790	intel_ring_emit(ring, PP_DIR_DCLV_2G);
 791	intel_ring_emit(ring, RING_PP_DIR_BASE(ring));
 792	intel_ring_emit(ring, get_pd_offset(ppgtt));
 793	intel_ring_emit(ring, MI_NOOP);
 794	intel_ring_advance(ring);
 795
 796	return 0;
 
 
 
 
 797}
 798
 799static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
 800			  struct intel_ring_buffer *ring,
 801			  bool synchronous)
 802{
 803	struct drm_device *dev = ppgtt->base.dev;
 804	struct drm_i915_private *dev_priv = dev->dev_private;
 805	int ret;
 806
 807	/* If we're in reset, we can assume the GPU is sufficiently idle to
 808	 * manually frob these bits. Ideally we could use the ring functions,
 809	 * except our error handling makes it quite difficult (can't use
 810	 * intel_ring_begin, ring->flush, or intel_ring_advance)
 811	 *
 812	 * FIXME: We should try not to special case reset
 813	 */
 814	if (synchronous ||
 815	    i915_reset_in_progress(&dev_priv->gpu_error)) {
 816		WARN_ON(ppgtt != dev_priv->mm.aliasing_ppgtt);
 817		I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
 818		I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
 819		POSTING_READ(RING_PP_DIR_BASE(ring));
 820		return 0;
 821	}
 822
 823	/* NB: TLBs must be flushed and invalidated before a switch */
 824	ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
 825	if (ret)
 826		return ret;
 827
 828	ret = intel_ring_begin(ring, 6);
 829	if (ret)
 830		return ret;
 831
 832	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
 833	intel_ring_emit(ring, RING_PP_DIR_DCLV(ring));
 834	intel_ring_emit(ring, PP_DIR_DCLV_2G);
 835	intel_ring_emit(ring, RING_PP_DIR_BASE(ring));
 836	intel_ring_emit(ring, get_pd_offset(ppgtt));
 837	intel_ring_emit(ring, MI_NOOP);
 838	intel_ring_advance(ring);
 839
 840	/* XXX: RCS is the only one to auto invalidate the TLBs? */
 841	if (ring->id != RCS) {
 842		ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
 843		if (ret)
 844			return ret;
 845	}
 846
 847	return 0;
 848}
 849
 850static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
 851			  struct intel_ring_buffer *ring,
 852			  bool synchronous)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 853{
 854	struct drm_device *dev = ppgtt->base.dev;
 855	struct drm_i915_private *dev_priv = dev->dev_private;
 856
 857	if (!synchronous)
 858		return 0;
 859
 860	I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
 861	I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
 
 862
 863	POSTING_READ(RING_PP_DIR_DCLV(ring));
 
 
 
 864
 865	return 0;
 
 
 866}
 867
 868static int gen8_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
 
 
 
 
 
 
 
 869{
 870	struct drm_device *dev = ppgtt->base.dev;
 871	struct drm_i915_private *dev_priv = dev->dev_private;
 872	struct intel_ring_buffer *ring;
 873	int j, ret;
 874
 875	for_each_ring(ring, dev_priv, j) {
 876		I915_WRITE(RING_MODE_GEN7(ring),
 877			   _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
 
 
 
 878
 879		/* We promise to do a switch later with FULL PPGTT. If this is
 880		 * aliasing, this is the one and only switch we'll do */
 881		if (USES_FULL_PPGTT(dev))
 882			continue;
 
 
 
 
 
 
 
 
 
 883
 884		ret = ppgtt->switch_mm(ppgtt, ring, true);
 885		if (ret)
 886			goto err_out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 887	}
 888
 889	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 890
 891err_out:
 892	for_each_ring(ring, dev_priv, j)
 893		I915_WRITE(RING_MODE_GEN7(ring),
 894			   _MASKED_BIT_DISABLE(GFX_PPGTT_ENABLE));
 895	return ret;
 
 
 
 896}
 897
 898static int gen7_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
 899{
 900	struct drm_device *dev = ppgtt->base.dev;
 901	struct drm_i915_private *dev_priv = dev->dev_private;
 902	struct intel_ring_buffer *ring;
 903	uint32_t ecochk, ecobits;
 904	int i;
 905
 906	ecobits = I915_READ(GAC_ECO_BITS);
 907	I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
 908
 909	ecochk = I915_READ(GAM_ECOCHK);
 910	if (IS_HASWELL(dev)) {
 911		ecochk |= ECOCHK_PPGTT_WB_HSW;
 912	} else {
 913		ecochk |= ECOCHK_PPGTT_LLC_IVB;
 914		ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
 915	}
 916	I915_WRITE(GAM_ECOCHK, ecochk);
 917
 918	for_each_ring(ring, dev_priv, i) {
 919		int ret;
 920		/* GFX_MODE is per-ring on gen7+ */
 921		I915_WRITE(RING_MODE_GEN7(ring),
 922			   _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
 923
 924		/* We promise to do a switch later with FULL PPGTT. If this is
 925		 * aliasing, this is the one and only switch we'll do */
 926		if (USES_FULL_PPGTT(dev))
 927			continue;
 928
 929		ret = ppgtt->switch_mm(ppgtt, ring, true);
 930		if (ret)
 931			return ret;
 932	}
 933
 934	return 0;
 935}
 936
 937static int gen6_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
 938{
 939	struct drm_device *dev = ppgtt->base.dev;
 940	struct drm_i915_private *dev_priv = dev->dev_private;
 941	struct intel_ring_buffer *ring;
 942	uint32_t ecochk, gab_ctl, ecobits;
 943	int i;
 944
 945	ecobits = I915_READ(GAC_ECO_BITS);
 946	I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
 947		   ECOBITS_PPGTT_CACHE64B);
 948
 949	gab_ctl = I915_READ(GAB_CTL);
 950	I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
 
 
 951
 952	ecochk = I915_READ(GAM_ECOCHK);
 953	I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
 
 
 954
 955	I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
 
 
 
 956
 957	for_each_ring(ring, dev_priv, i) {
 958		int ret = ppgtt->switch_mm(ppgtt, ring, true);
 959		if (ret)
 960			return ret;
 961	}
 962
 963	return 0;
 964}
 965
 966/* PPGTT support for Sandybdrige/Gen6 and later */
 967static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
 968				   uint64_t start,
 969				   uint64_t length,
 970				   bool use_scratch)
 971{
 972	struct i915_hw_ppgtt *ppgtt =
 973		container_of(vm, struct i915_hw_ppgtt, base);
 974	gen6_gtt_pte_t *pt_vaddr, scratch_pte;
 975	unsigned first_entry = start >> PAGE_SHIFT;
 976	unsigned num_entries = length >> PAGE_SHIFT;
 977	unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
 978	unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
 979	unsigned last_pte, i;
 980
 981	scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true);
 982
 983	while (num_entries) {
 984		last_pte = first_pte + num_entries;
 985		if (last_pte > I915_PPGTT_PT_ENTRIES)
 986			last_pte = I915_PPGTT_PT_ENTRIES;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 987
 988		pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
 
 
 989
 990		for (i = first_pte; i < last_pte; i++)
 991			pt_vaddr[i] = scratch_pte;
 992
 993		kunmap_atomic(pt_vaddr);
 994
 995		num_entries -= last_pte - first_pte;
 996		first_pte = 0;
 997		act_pt++;
 998	}
 999}
1000
1001static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
1002				      struct sg_table *pages,
1003				      uint64_t start,
1004				      enum i915_cache_level cache_level)
1005{
1006	struct i915_hw_ppgtt *ppgtt =
1007		container_of(vm, struct i915_hw_ppgtt, base);
1008	gen6_gtt_pte_t *pt_vaddr;
1009	unsigned first_entry = start >> PAGE_SHIFT;
1010	unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
1011	unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES;
1012	struct sg_page_iter sg_iter;
1013
1014	pt_vaddr = NULL;
1015	for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
1016		if (pt_vaddr == NULL)
1017			pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
1018
1019		pt_vaddr[act_pte] =
1020			vm->pte_encode(sg_page_iter_dma_address(&sg_iter),
1021				       cache_level, true);
1022		if (++act_pte == I915_PPGTT_PT_ENTRIES) {
1023			kunmap_atomic(pt_vaddr);
1024			pt_vaddr = NULL;
1025			act_pt++;
 
 
 
 
 
 
 
 
1026			act_pte = 0;
1027		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1028	}
1029	if (pt_vaddr)
1030		kunmap_atomic(pt_vaddr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1031}
1032
1033static void gen6_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt)
1034{
1035	int i;
 
 
 
 
 
 
 
 
 
 
1036
1037	if (ppgtt->pt_dma_addr) {
1038		for (i = 0; i < ppgtt->num_pd_entries; i++)
1039			pci_unmap_page(ppgtt->base.dev->pdev,
1040				       ppgtt->pt_dma_addr[i],
1041				       4096, PCI_DMA_BIDIRECTIONAL);
1042	}
 
 
 
 
 
1043}
1044
1045static void gen6_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
1046{
1047	int i;
 
 
 
 
1048
1049	kfree(ppgtt->pt_dma_addr);
1050	for (i = 0; i < ppgtt->num_pd_entries; i++)
1051		__free_page(ppgtt->pt_pages[i]);
1052	kfree(ppgtt->pt_pages);
1053}
1054
1055static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
1056{
1057	struct i915_hw_ppgtt *ppgtt =
1058		container_of(vm, struct i915_hw_ppgtt, base);
1059
1060	list_del(&vm->global_link);
1061	drm_mm_takedown(&ppgtt->base.mm);
1062	drm_mm_remove_node(&ppgtt->node);
 
1063
1064	gen6_ppgtt_unmap_pages(ppgtt);
1065	gen6_ppgtt_free(ppgtt);
 
1066}
1067
1068static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
1069{
1070#define GEN6_PD_ALIGN (PAGE_SIZE * 16)
1071#define GEN6_PD_SIZE (GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE)
1072	struct drm_device *dev = ppgtt->base.dev;
1073	struct drm_i915_private *dev_priv = dev->dev_private;
1074	bool retried = false;
1075	int ret;
1076
1077	/* PPGTT PDEs reside in the GGTT and consists of 512 entries. The
1078	 * allocator works in address space sizes, so it's multiplied by page
1079	 * size. We allocate at the top of the GTT to avoid fragmentation.
1080	 */
1081	BUG_ON(!drm_mm_initialized(&dev_priv->gtt.base.mm));
1082alloc:
1083	ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm,
1084						  &ppgtt->node, GEN6_PD_SIZE,
1085						  GEN6_PD_ALIGN, 0,
1086						  0, dev_priv->gtt.base.total,
1087						  DRM_MM_SEARCH_DEFAULT,
1088						  DRM_MM_CREATE_DEFAULT);
1089	if (ret == -ENOSPC && !retried) {
1090		ret = i915_gem_evict_something(dev, &dev_priv->gtt.base,
1091					       GEN6_PD_SIZE, GEN6_PD_ALIGN,
1092					       I915_CACHE_NONE,
1093					       0, dev_priv->gtt.base.total,
1094					       0);
1095		if (ret)
1096			return ret;
1097
1098		retried = true;
1099		goto alloc;
1100	}
1101
1102	if (ppgtt->node.start < dev_priv->gtt.mappable_end)
1103		DRM_DEBUG("Forced to use aperture for PDEs\n");
1104
1105	ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES;
1106	return ret;
1107}
1108
1109static int gen6_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt)
 
 
1110{
1111	int i;
 
 
 
 
1112
1113	ppgtt->pt_pages = kcalloc(ppgtt->num_pd_entries, sizeof(struct page *),
1114				  GFP_KERNEL);
1115
1116	if (!ppgtt->pt_pages)
1117		return -ENOMEM;
1118
1119	for (i = 0; i < ppgtt->num_pd_entries; i++) {
1120		ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
1121		if (!ppgtt->pt_pages[i]) {
1122			gen6_ppgtt_free(ppgtt);
1123			return -ENOMEM;
1124		}
1125	}
1126
1127	return 0;
1128}
1129
1130static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt)
1131{
1132	int ret;
 
 
 
 
 
1133
1134	ret = gen6_ppgtt_allocate_page_directories(ppgtt);
1135	if (ret)
1136		return ret;
1137
1138	ret = gen6_ppgtt_allocate_page_tables(ppgtt);
1139	if (ret) {
1140		drm_mm_remove_node(&ppgtt->node);
1141		return ret;
1142	}
1143
1144	ppgtt->pt_dma_addr = kcalloc(ppgtt->num_pd_entries, sizeof(dma_addr_t),
1145				     GFP_KERNEL);
1146	if (!ppgtt->pt_dma_addr) {
1147		drm_mm_remove_node(&ppgtt->node);
1148		gen6_ppgtt_free(ppgtt);
1149		return -ENOMEM;
1150	}
1151
1152	return 0;
1153}
1154
1155static int gen6_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt)
 
 
 
 
 
 
 
1156{
1157	struct drm_device *dev = ppgtt->base.dev;
1158	int i;
 
1159
1160	for (i = 0; i < ppgtt->num_pd_entries; i++) {
1161		dma_addr_t pt_addr;
1162
1163		pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i], 0, 4096,
1164				       PCI_DMA_BIDIRECTIONAL);
 
1165
1166		if (pci_dma_mapping_error(dev->pdev, pt_addr)) {
1167			gen6_ppgtt_unmap_pages(ppgtt);
1168			return -EIO;
1169		}
1170
1171		ppgtt->pt_dma_addr[i] = pt_addr;
1172	}
 
 
 
 
 
 
 
 
 
 
 
 
 
1173
1174	return 0;
1175}
1176
1177static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
1178{
1179	struct drm_device *dev = ppgtt->base.dev;
1180	struct drm_i915_private *dev_priv = dev->dev_private;
1181	int ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1182
1183	ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode;
1184	if (IS_GEN6(dev)) {
1185		ppgtt->enable = gen6_ppgtt_enable;
1186		ppgtt->switch_mm = gen6_mm_switch;
1187	} else if (IS_HASWELL(dev)) {
1188		ppgtt->enable = gen7_ppgtt_enable;
1189		ppgtt->switch_mm = hsw_mm_switch;
1190	} else if (IS_GEN7(dev)) {
1191		ppgtt->enable = gen7_ppgtt_enable;
1192		ppgtt->switch_mm = gen7_mm_switch;
1193	} else
1194		BUG();
1195
1196	ret = gen6_ppgtt_alloc(ppgtt);
1197	if (ret)
1198		return ret;
 
1199
1200	ret = gen6_ppgtt_setup_page_tables(ppgtt);
1201	if (ret) {
1202		gen6_ppgtt_free(ppgtt);
1203		return ret;
1204	}
1205
1206	ppgtt->base.clear_range = gen6_ppgtt_clear_range;
1207	ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
1208	ppgtt->base.cleanup = gen6_ppgtt_cleanup;
1209	ppgtt->base.start = 0;
1210	ppgtt->base.total =  ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES * PAGE_SIZE;
1211	ppgtt->debug_dump = gen6_dump_ppgtt;
1212
1213	ppgtt->pd_offset =
1214		ppgtt->node.start / PAGE_SIZE * sizeof(gen6_gtt_pte_t);
1215
1216	ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true);
 
 
1217
1218	DRM_DEBUG_DRIVER("Allocated pde space (%ldM) at GTT entry: %lx\n",
1219			 ppgtt->node.size >> 20,
1220			 ppgtt->node.start / PAGE_SIZE);
1221
1222	return 0;
 
1223}
1224
1225int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
1226{
1227	struct drm_i915_private *dev_priv = dev->dev_private;
1228	int ret = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
1229
1230	ppgtt->base.dev = dev;
1231	ppgtt->base.scratch = dev_priv->gtt.base.scratch;
1232
1233	if (INTEL_INFO(dev)->gen < 8)
1234		ret = gen6_ppgtt_init(ppgtt);
1235	else if (IS_GEN8(dev))
1236		ret = gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total);
1237	else
1238		BUG();
 
 
 
1239
1240	if (!ret) {
1241		struct drm_i915_private *dev_priv = dev->dev_private;
1242		kref_init(&ppgtt->ref);
1243		drm_mm_init(&ppgtt->base.mm, ppgtt->base.start,
1244			    ppgtt->base.total);
1245		i915_init_vm(dev_priv, &ppgtt->base);
1246		if (INTEL_INFO(dev)->gen < 8) {
1247			gen6_write_pdes(ppgtt);
1248			DRM_DEBUG("Adding PPGTT at offset %x\n",
1249				  ppgtt->pd_offset << 10);
1250		}
1251	}
1252
1253	return ret;
 
 
 
 
 
 
 
 
1254}
1255
1256static void
1257ppgtt_bind_vma(struct i915_vma *vma,
1258	       enum i915_cache_level cache_level,
1259	       u32 flags)
1260{
1261	vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start,
1262				cache_level);
1263}
1264
1265static void ppgtt_unbind_vma(struct i915_vma *vma)
1266{
1267	vma->vm->clear_range(vma->vm,
1268			     vma->node.start,
1269			     vma->obj->base.size,
1270			     true);
1271}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1272
1273extern int intel_iommu_gfx_mapped;
1274/* Certain Gen5 chipsets require require idling the GPU before
1275 * unmapping anything from the GTT when VT-d is enabled.
1276 */
1277static inline bool needs_idle_maps(struct drm_device *dev)
1278{
1279#ifdef CONFIG_INTEL_IOMMU
1280	/* Query intel_iommu to see if we need the workaround. Presumably that
1281	 * was loaded first.
 
1282	 */
1283	if (IS_GEN5(dev) && IS_MOBILE(dev) && intel_iommu_gfx_mapped)
1284		return true;
1285#endif
1286	return false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1287}
1288
1289static bool do_idling(struct drm_i915_private *dev_priv)
1290{
1291	bool ret = dev_priv->mm.interruptible;
 
 
1292
1293	if (unlikely(dev_priv->gtt.do_idle_maps)) {
1294		dev_priv->mm.interruptible = false;
1295		if (i915_gpu_idle(dev_priv->dev)) {
1296			DRM_ERROR("Couldn't idle GPU\n");
1297			/* Wait a bit, in hopes it avoids the hang */
1298			udelay(10);
1299		}
1300	}
1301
1302	return ret;
1303}
1304
1305static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
 
1306{
1307	if (unlikely(dev_priv->gtt.do_idle_maps))
1308		dev_priv->mm.interruptible = interruptible;
 
 
1309}
1310
1311void i915_check_and_clear_faults(struct drm_device *dev)
 
1312{
1313	struct drm_i915_private *dev_priv = dev->dev_private;
1314	struct intel_ring_buffer *ring;
1315	int i;
 
 
 
 
1316
1317	if (INTEL_INFO(dev)->gen < 6)
1318		return;
1319
1320	for_each_ring(ring, dev_priv, i) {
1321		u32 fault_reg;
1322		fault_reg = I915_READ(RING_FAULT_REG(ring));
1323		if (fault_reg & RING_FAULT_VALID) {
1324			DRM_DEBUG_DRIVER("Unexpected fault\n"
1325					 "\tAddr: 0x%08lx\\n"
1326					 "\tAddress space: %s\n"
1327					 "\tSource ID: %d\n"
1328					 "\tType: %d\n",
1329					 fault_reg & PAGE_MASK,
1330					 fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
1331					 RING_FAULT_SRCID(fault_reg),
1332					 RING_FAULT_FAULT_TYPE(fault_reg));
1333			I915_WRITE(RING_FAULT_REG(ring),
1334				   fault_reg & ~RING_FAULT_VALID);
1335		}
1336	}
1337	POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS]));
1338}
1339
1340void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
1341{
1342	struct drm_i915_private *dev_priv = dev->dev_private;
1343
1344	/* Don't bother messing with faults pre GEN6 as we have little
1345	 * documentation supporting that it's a good idea.
1346	 */
1347	if (INTEL_INFO(dev)->gen < 6)
1348		return;
1349
1350	i915_check_and_clear_faults(dev);
1351
1352	dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
1353				       dev_priv->gtt.base.start,
1354				       dev_priv->gtt.base.total,
1355				       true);
1356}
1357
1358void i915_gem_restore_gtt_mappings(struct drm_device *dev)
1359{
1360	struct drm_i915_private *dev_priv = dev->dev_private;
1361	struct drm_i915_gem_object *obj;
1362	struct i915_address_space *vm;
1363
1364	i915_check_and_clear_faults(dev);
 
 
 
 
 
 
 
 
1365
1366	/* First fill our portion of the GTT with scratch pages */
1367	dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
1368				       dev_priv->gtt.base.start,
1369				       dev_priv->gtt.base.total,
1370				       true);
1371
1372	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
1373		struct i915_vma *vma = i915_gem_obj_to_vma(obj,
1374							   &dev_priv->gtt.base);
1375		if (!vma)
1376			continue;
1377
1378		i915_gem_clflush_object(obj, obj->pin_display);
1379		/* The bind_vma code tries to be smart about tracking mappings.
1380		 * Unfortunately above, we've just wiped out the mappings
1381		 * without telling our object about it. So we need to fake it.
1382		 */
1383		obj->has_global_gtt_mapping = 0;
1384		vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
1385	}
 
 
1386
 
 
1387
1388	if (INTEL_INFO(dev)->gen >= 8) {
1389		gen8_setup_private_ppat(dev_priv);
1390		return;
1391	}
1392
1393	list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
1394		/* TODO: Perhaps it shouldn't be gen6 specific */
1395		if (i915_is_ggtt(vm)) {
1396			if (dev_priv->mm.aliasing_ppgtt)
1397				gen6_write_pdes(dev_priv->mm.aliasing_ppgtt);
1398			continue;
1399		}
 
 
1400
1401		gen6_write_pdes(container_of(vm, struct i915_hw_ppgtt, base));
1402	}
1403
1404	i915_gem_chipset_flush(dev);
1405}
1406
1407int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
 
 
 
1408{
1409	if (obj->has_dma_mapping)
1410		return 0;
 
 
 
 
 
 
 
 
1411
1412	if (!dma_map_sg(&obj->base.dev->pdev->dev,
1413			obj->pages->sgl, obj->pages->nents,
1414			PCI_DMA_BIDIRECTIONAL))
1415		return -ENOSPC;
1416
1417	return 0;
 
 
 
 
1418}
1419
1420static inline void gen8_set_pte(void __iomem *addr, gen8_gtt_pte_t pte)
 
 
 
 
1421{
1422#ifdef writeq
1423	writeq(pte, addr);
1424#else
1425	iowrite32((u32)pte, addr);
1426	iowrite32(pte >> 32, addr + 4);
1427#endif
1428}
1429
1430static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
1431				     struct sg_table *st,
1432				     uint64_t start,
1433				     enum i915_cache_level level)
1434{
1435	struct drm_i915_private *dev_priv = vm->dev->dev_private;
1436	unsigned first_entry = start >> PAGE_SHIFT;
1437	gen8_gtt_pte_t __iomem *gtt_entries =
1438		(gen8_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
1439	int i = 0;
1440	struct sg_page_iter sg_iter;
1441	dma_addr_t addr;
1442
1443	for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
1444		addr = sg_dma_address(sg_iter.sg) +
1445			(sg_iter.sg_pgoffset << PAGE_SHIFT);
1446		gen8_set_pte(&gtt_entries[i],
1447			     gen8_pte_encode(addr, level, true));
1448		i++;
1449	}
1450
1451	/*
1452	 * XXX: This serves as a posting read to make sure that the PTE has
1453	 * actually been updated. There is some concern that even though
1454	 * registers and PTEs are within the same BAR that they are potentially
1455	 * of NUMA access patterns. Therefore, even with the way we assume
1456	 * hardware should work, we must keep this posting read for paranoia.
1457	 */
1458	if (i != 0)
1459		WARN_ON(readq(&gtt_entries[i-1])
1460			!= gen8_pte_encode(addr, level, true));
1461
1462	/* This next bit makes the above posting read even more important. We
1463	 * want to flush the TLBs only after we're certain all the PTE updates
1464	 * have finished.
1465	 */
1466	I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
1467	POSTING_READ(GFX_FLSH_CNTL_GEN6);
1468}
1469
1470/*
1471 * Binds an object into the global gtt with the specified cache level. The object
1472 * will be accessible to the GPU via commands whose operands reference offsets
1473 * within the global GTT as well as accessible by the GPU through the GMADR
1474 * mapped BAR (dev_priv->mm.gtt->gtt).
1475 */
1476static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
1477				     struct sg_table *st,
1478				     uint64_t start,
1479				     enum i915_cache_level level)
1480{
1481	struct drm_i915_private *dev_priv = vm->dev->dev_private;
1482	unsigned first_entry = start >> PAGE_SHIFT;
1483	gen6_gtt_pte_t __iomem *gtt_entries =
1484		(gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
1485	int i = 0;
1486	struct sg_page_iter sg_iter;
1487	dma_addr_t addr;
 
 
1488
1489	for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
1490		addr = sg_page_iter_dma_address(&sg_iter);
1491		iowrite32(vm->pte_encode(addr, level, true), &gtt_entries[i]);
1492		i++;
1493	}
1494
1495	/* XXX: This serves as a posting read to make sure that the PTE has
1496	 * actually been updated. There is some concern that even though
1497	 * registers and PTEs are within the same BAR that they are potentially
1498	 * of NUMA access patterns. Therefore, even with the way we assume
1499	 * hardware should work, we must keep this posting read for paranoia.
1500	 */
1501	if (i != 0)
1502		WARN_ON(readl(&gtt_entries[i-1]) !=
1503			vm->pte_encode(addr, level, true));
1504
1505	/* This next bit makes the above posting read even more important. We
1506	 * want to flush the TLBs only after we're certain all the PTE updates
1507	 * have finished.
1508	 */
1509	I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
1510	POSTING_READ(GFX_FLSH_CNTL_GEN6);
 
 
 
 
1511}
1512
1513static void gen8_ggtt_clear_range(struct i915_address_space *vm,
1514				  uint64_t start,
1515				  uint64_t length,
1516				  bool use_scratch)
1517{
1518	struct drm_i915_private *dev_priv = vm->dev->dev_private;
1519	unsigned first_entry = start >> PAGE_SHIFT;
1520	unsigned num_entries = length >> PAGE_SHIFT;
1521	gen8_gtt_pte_t scratch_pte, __iomem *gtt_base =
1522		(gen8_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
1523	const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
1524	int i;
1525
1526	if (WARN(num_entries > max_entries,
1527		 "First entry = %d; Num entries = %d (max=%d)\n",
1528		 first_entry, num_entries, max_entries))
1529		num_entries = max_entries;
1530
1531	scratch_pte = gen8_pte_encode(vm->scratch.addr,
1532				      I915_CACHE_LLC,
1533				      use_scratch);
1534	for (i = 0; i < num_entries; i++)
1535		gen8_set_pte(&gtt_base[i], scratch_pte);
1536	readl(gtt_base);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1537}
1538
1539static void gen6_ggtt_clear_range(struct i915_address_space *vm,
1540				  uint64_t start,
1541				  uint64_t length,
1542				  bool use_scratch)
1543{
1544	struct drm_i915_private *dev_priv = vm->dev->dev_private;
1545	unsigned first_entry = start >> PAGE_SHIFT;
1546	unsigned num_entries = length >> PAGE_SHIFT;
1547	gen6_gtt_pte_t scratch_pte, __iomem *gtt_base =
1548		(gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
1549	const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
1550	int i;
1551
1552	if (WARN(num_entries > max_entries,
1553		 "First entry = %d; Num entries = %d (max=%d)\n",
1554		 first_entry, num_entries, max_entries))
1555		num_entries = max_entries;
1556
1557	scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch);
1558
1559	for (i = 0; i < num_entries; i++)
1560		iowrite32(scratch_pte, &gtt_base[i]);
1561	readl(gtt_base);
1562}
1563
 
 
 
 
 
 
 
 
 
 
 
1564
1565static void i915_ggtt_bind_vma(struct i915_vma *vma,
1566			       enum i915_cache_level cache_level,
1567			       u32 unused)
 
1568{
1569	const unsigned long entry = vma->node.start >> PAGE_SHIFT;
1570	unsigned int flags = (cache_level == I915_CACHE_NONE) ?
1571		AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
1572
1573	BUG_ON(!i915_is_ggtt(vma->vm));
1574	intel_gtt_insert_sg_entries(vma->obj->pages, entry, flags);
1575	vma->obj->has_global_gtt_mapping = 1;
1576}
1577
1578static void i915_ggtt_clear_range(struct i915_address_space *vm,
1579				  uint64_t start,
1580				  uint64_t length,
1581				  bool unused)
 
 
 
 
 
1582{
1583	unsigned first_entry = start >> PAGE_SHIFT;
1584	unsigned num_entries = length >> PAGE_SHIFT;
1585	intel_gtt_clear_range(first_entry, num_entries);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1586}
1587
1588static void i915_ggtt_unbind_vma(struct i915_vma *vma)
1589{
1590	const unsigned int first = vma->node.start >> PAGE_SHIFT;
1591	const unsigned int size = vma->obj->base.size >> PAGE_SHIFT;
1592
1593	BUG_ON(!i915_is_ggtt(vma->vm));
1594	vma->obj->has_global_gtt_mapping = 0;
1595	intel_gtt_clear_range(first, size);
1596}
1597
1598static void ggtt_bind_vma(struct i915_vma *vma,
1599			  enum i915_cache_level cache_level,
1600			  u32 flags)
1601{
1602	struct drm_device *dev = vma->vm->dev;
1603	struct drm_i915_private *dev_priv = dev->dev_private;
1604	struct drm_i915_gem_object *obj = vma->obj;
1605
1606	/* If there is no aliasing PPGTT, or the caller needs a global mapping,
1607	 * or we have a global mapping already but the cacheability flags have
1608	 * changed, set the global PTEs.
1609	 *
1610	 * If there is an aliasing PPGTT it is anecdotally faster, so use that
1611	 * instead if none of the above hold true.
1612	 *
1613	 * NB: A global mapping should only be needed for special regions like
1614	 * "gtt mappable", SNB errata, or if specified via special execbuf
1615	 * flags. At all other times, the GPU will use the aliasing PPGTT.
1616	 */
1617	if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) {
1618		if (!obj->has_global_gtt_mapping ||
1619		    (cache_level != obj->cache_level)) {
1620			vma->vm->insert_entries(vma->vm, obj->pages,
1621						vma->node.start,
1622						cache_level);
1623			obj->has_global_gtt_mapping = 1;
1624		}
 
 
 
1625	}
1626
1627	if (dev_priv->mm.aliasing_ppgtt &&
1628	    (!obj->has_aliasing_ppgtt_mapping ||
1629	     (cache_level != obj->cache_level))) {
1630		struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
1631		appgtt->base.insert_entries(&appgtt->base,
1632					    vma->obj->pages,
1633					    vma->node.start,
1634					    cache_level);
1635		vma->obj->has_aliasing_ppgtt_mapping = 1;
1636	}
 
 
1637}
1638
1639static void ggtt_unbind_vma(struct i915_vma *vma)
1640{
1641	struct drm_device *dev = vma->vm->dev;
1642	struct drm_i915_private *dev_priv = dev->dev_private;
1643	struct drm_i915_gem_object *obj = vma->obj;
 
 
1644
1645	if (obj->has_global_gtt_mapping) {
1646		vma->vm->clear_range(vma->vm,
1647				     vma->node.start,
1648				     obj->base.size,
1649				     true);
1650		obj->has_global_gtt_mapping = 0;
1651	}
1652
1653	if (obj->has_aliasing_ppgtt_mapping) {
1654		struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
1655		appgtt->base.clear_range(&appgtt->base,
1656					 vma->node.start,
1657					 obj->base.size,
1658					 true);
1659		obj->has_aliasing_ppgtt_mapping = 0;
1660	}
1661}
1662
1663void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
 
1664{
1665	struct drm_device *dev = obj->base.dev;
1666	struct drm_i915_private *dev_priv = dev->dev_private;
1667	bool interruptible;
1668
1669	interruptible = do_idling(dev_priv);
 
 
 
 
 
 
1670
1671	if (!obj->has_dma_mapping)
1672		dma_unmap_sg(&dev->pdev->dev,
1673			     obj->pages->sgl, obj->pages->nents,
1674			     PCI_DMA_BIDIRECTIONAL);
 
 
1675
1676	undo_idling(dev_priv, interruptible);
 
 
 
 
 
 
 
 
1677}
1678
1679static void i915_gtt_color_adjust(struct drm_mm_node *node,
1680				  unsigned long color,
1681				  unsigned long *start,
1682				  unsigned long *end)
1683{
 
 
 
 
 
 
 
 
 
1684	if (node->color != color)
1685		*start += 4096;
 
1686
1687	if (!list_empty(&node->node_list)) {
1688		node = list_entry(node->node_list.next,
1689				  struct drm_mm_node,
1690				  node_list);
1691		if (node->allocated && node->color != color)
1692			*end -= 4096;
 
 
 
 
 
 
1693	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1694}
1695
1696void i915_gem_setup_global_gtt(struct drm_device *dev,
1697			       unsigned long start,
1698			       unsigned long mappable_end,
1699			       unsigned long end)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1700{
1701	/* Let GEM Manage all of the aperture.
1702	 *
1703	 * However, leave one page at the end still bound to the scratch page.
1704	 * There are a number of places where the hardware apparently prefetches
1705	 * past the end of the object, and we've seen multiple hangs with the
1706	 * GPU head pointer stuck in a batchbuffer bound at the last page of the
1707	 * aperture.  One page should be enough to keep any prefetching inside
1708	 * of the aperture.
1709	 */
1710	struct drm_i915_private *dev_priv = dev->dev_private;
1711	struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
1712	struct drm_mm_node *entry;
1713	struct drm_i915_gem_object *obj;
1714	unsigned long hole_start, hole_end;
1715
1716	BUG_ON(mappable_end > end);
 
 
 
 
 
 
 
1717
1718	/* Subtract the guard page ... */
1719	drm_mm_init(&ggtt_vm->mm, start, end - start - PAGE_SIZE);
1720	if (!HAS_LLC(dev))
1721		dev_priv->gtt.base.mm.color_adjust = i915_gtt_color_adjust;
1722
1723	/* Mark any preallocated objects as occupied */
1724	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
1725		struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
1726		int ret;
1727		DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n",
1728			      i915_gem_obj_ggtt_offset(obj), obj->base.size);
1729
1730		WARN_ON(i915_gem_obj_ggtt_bound(obj));
1731		ret = drm_mm_reserve_node(&ggtt_vm->mm, &vma->node);
1732		if (ret)
1733			DRM_DEBUG_KMS("Reservation failed\n");
1734		obj->has_global_gtt_mapping = 1;
1735	}
 
1736
1737	dev_priv->gtt.base.start = start;
1738	dev_priv->gtt.base.total = end - start;
 
 
 
 
 
 
1739
1740	/* Clear any non-preallocated blocks */
1741	drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) {
1742		DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
1743			      hole_start, hole_end);
1744		ggtt_vm->clear_range(ggtt_vm, hole_start,
1745				     hole_end - hole_start, true);
1746	}
1747
1748	/* And finally clear the reserved guard page */
1749	ggtt_vm->clear_range(ggtt_vm, end - PAGE_SIZE, PAGE_SIZE, true);
 
 
 
 
 
 
1750}
1751
1752void i915_gem_init_global_gtt(struct drm_device *dev)
1753{
1754	struct drm_i915_private *dev_priv = dev->dev_private;
1755	unsigned long gtt_size, mappable_size;
 
 
 
1756
1757	gtt_size = dev_priv->gtt.base.total;
1758	mappable_size = dev_priv->gtt.mappable_end;
 
 
 
1759
1760	i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
1761}
1762
1763static int setup_scratch_page(struct drm_device *dev)
1764{
1765	struct drm_i915_private *dev_priv = dev->dev_private;
1766	struct page *page;
1767	dma_addr_t dma_addr;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1768
1769	page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
1770	if (page == NULL)
1771		return -ENOMEM;
1772	get_page(page);
1773	set_pages_uc(page, 1);
1774
1775#ifdef CONFIG_INTEL_IOMMU
1776	dma_addr = pci_map_page(dev->pdev, page, 0, PAGE_SIZE,
1777				PCI_DMA_BIDIRECTIONAL);
1778	if (pci_dma_mapping_error(dev->pdev, dma_addr))
1779		return -EINVAL;
1780#else
1781	dma_addr = page_to_phys(page);
1782#endif
1783	dev_priv->gtt.base.scratch.page = page;
1784	dev_priv->gtt.base.scratch.addr = dma_addr;
1785
1786	return 0;
 
1787}
1788
1789static void teardown_scratch_page(struct drm_device *dev)
 
 
 
 
1790{
1791	struct drm_i915_private *dev_priv = dev->dev_private;
1792	struct page *page = dev_priv->gtt.base.scratch.page;
 
1793
1794	set_pages_wb(page, 1);
1795	pci_unmap_page(dev->pdev, dev_priv->gtt.base.scratch.addr,
1796		       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
1797	put_page(page);
1798	__free_page(page);
 
 
 
 
1799}
1800
1801static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
1802{
1803	snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
1804	snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
1805	return snb_gmch_ctl << 20;
1806}
1807
1808static inline unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
1809{
1810	bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
1811	bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
1812	if (bdw_gmch_ctl)
1813		bdw_gmch_ctl = 1 << bdw_gmch_ctl;
 
 
 
 
 
 
 
1814	return bdw_gmch_ctl << 20;
1815}
1816
1817static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
1818{
1819	snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
1820	snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
1821	return snb_gmch_ctl << 25; /* 32 MB units */
1822}
 
1823
1824static inline size_t gen8_get_stolen_size(u16 bdw_gmch_ctl)
1825{
1826	bdw_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
1827	bdw_gmch_ctl &= BDW_GMCH_GMS_MASK;
1828	return bdw_gmch_ctl << 25; /* 32 MB units */
1829}
1830
1831static int ggtt_probe_common(struct drm_device *dev,
1832			     size_t gtt_size)
1833{
1834	struct drm_i915_private *dev_priv = dev->dev_private;
1835	phys_addr_t gtt_phys_addr;
 
1836	int ret;
1837
1838	/* For Modern GENs the PTEs and register space are split in the BAR */
1839	gtt_phys_addr = pci_resource_start(dev->pdev, 0) +
1840		(pci_resource_len(dev->pdev, 0) / 2);
1841
1842	dev_priv->gtt.gsm = ioremap_wc(gtt_phys_addr, gtt_size);
1843	if (!dev_priv->gtt.gsm) {
1844		DRM_ERROR("Failed to map the gtt page table\n");
 
 
 
 
 
 
 
 
 
 
1845		return -ENOMEM;
1846	}
1847
1848	ret = setup_scratch_page(dev);
1849	if (ret) {
1850		DRM_ERROR("Scratch setup failed\n");
1851		/* iounmap will also get called at remove, but meh */
1852		iounmap(dev_priv->gtt.gsm);
 
1853	}
1854
1855	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1856}
1857
1858/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
1859 * bits. When using advanced contexts each context stores its own PAT, but
1860 * writing this data shouldn't be harmful even in those cases. */
1861static void gen8_setup_private_ppat(struct drm_i915_private *dev_priv)
1862{
1863#define GEN8_PPAT_UC		(0<<0)
1864#define GEN8_PPAT_WC		(1<<0)
1865#define GEN8_PPAT_WT		(2<<0)
1866#define GEN8_PPAT_WB		(3<<0)
1867#define GEN8_PPAT_ELLC_OVERRIDE	(0<<2)
1868/* FIXME(BDW): Bspec is completely confused about cache control bits. */
1869#define GEN8_PPAT_LLC		(1<<2)
1870#define GEN8_PPAT_LLCELLC	(2<<2)
1871#define GEN8_PPAT_LLCeLLC	(3<<2)
1872#define GEN8_PPAT_AGE(x)	(x<<4)
1873#define GEN8_PPAT(i, x) ((uint64_t) (x) << ((i) * 8))
1874	uint64_t pat;
1875
1876	pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC)     | /* for normal objects, no eLLC */
1877	      GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */
1878	      GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */
1879	      GEN8_PPAT(3, GEN8_PPAT_UC)                     | /* Uncached objects, mostly for scanout */
1880	      GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
1881	      GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
1882	      GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
1883	      GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
1884
1885	/* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b
1886	 * write would work. */
1887	I915_WRITE(GEN8_PRIVATE_PAT, pat);
1888	I915_WRITE(GEN8_PRIVATE_PAT + 4, pat >> 32);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1889}
1890
1891static int gen8_gmch_probe(struct drm_device *dev,
1892			   size_t *gtt_total,
1893			   size_t *stolen,
1894			   phys_addr_t *mappable_base,
1895			   unsigned long *mappable_end)
1896{
1897	struct drm_i915_private *dev_priv = dev->dev_private;
1898	unsigned int gtt_size;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1899	u16 snb_gmch_ctl;
1900	int ret;
1901
1902	/* TODO: We're not aware of mappable constraints on gen8 yet */
1903	*mappable_base = pci_resource_start(dev->pdev, 2);
1904	*mappable_end = pci_resource_len(dev->pdev, 2);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1905
1906	if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(39)))
1907		pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(39));
 
 
 
 
1908
1909	pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
1910
1911	*stolen = gen8_get_stolen_size(snb_gmch_ctl);
 
 
 
 
 
 
 
1912
1913	gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl);
1914	*gtt_total = (gtt_size / sizeof(gen8_gtt_pte_t)) << PAGE_SHIFT;
1915
1916	gen8_setup_private_ppat(dev_priv);
 
 
 
1917
1918	ret = ggtt_probe_common(dev, gtt_size);
1919
1920	dev_priv->gtt.base.clear_range = gen8_ggtt_clear_range;
1921	dev_priv->gtt.base.insert_entries = gen8_ggtt_insert_entries;
1922
1923	return ret;
1924}
1925
1926static int gen6_gmch_probe(struct drm_device *dev,
1927			   size_t *gtt_total,
1928			   size_t *stolen,
1929			   phys_addr_t *mappable_base,
1930			   unsigned long *mappable_end)
1931{
1932	struct drm_i915_private *dev_priv = dev->dev_private;
1933	unsigned int gtt_size;
 
1934	u16 snb_gmch_ctl;
1935	int ret;
1936
1937	*mappable_base = pci_resource_start(dev->pdev, 2);
1938	*mappable_end = pci_resource_len(dev->pdev, 2);
 
 
1939
1940	/* 64/512MB is the current min/max we actually know of, but this is just
1941	 * a coarse sanity check.
1942	 */
1943	if ((*mappable_end < (64<<20) || (*mappable_end > (512<<20)))) {
1944		DRM_ERROR("Unknown GMADR size (%lx)\n",
1945			  dev_priv->gtt.mappable_end);
1946		return -ENXIO;
1947	}
1948
1949	if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
1950		pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
1951	pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1952
1953	*stolen = gen6_get_stolen_size(snb_gmch_ctl);
 
 
 
1954
1955	gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
1956	*gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT;
1957
1958	ret = ggtt_probe_common(dev, gtt_size);
1959
1960	dev_priv->gtt.base.clear_range = gen6_ggtt_clear_range;
1961	dev_priv->gtt.base.insert_entries = gen6_ggtt_insert_entries;
1962
1963	return ret;
1964}
1965
1966static void gen6_gmch_remove(struct i915_address_space *vm)
1967{
1968
1969	struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
1970
1971	drm_mm_takedown(&vm->mm);
1972	iounmap(gtt->gsm);
1973	teardown_scratch_page(vm->dev);
1974}
1975
1976static int i915_gmch_probe(struct drm_device *dev,
1977			   size_t *gtt_total,
1978			   size_t *stolen,
1979			   phys_addr_t *mappable_base,
1980			   unsigned long *mappable_end)
1981{
1982	struct drm_i915_private *dev_priv = dev->dev_private;
 
1983	int ret;
1984
1985	ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL);
1986	if (!ret) {
1987		DRM_ERROR("failed to set up gmch\n");
1988		return -EIO;
1989	}
1990
1991	intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end);
 
 
 
 
 
 
 
 
 
 
1992
1993	dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
1994	dev_priv->gtt.base.clear_range = i915_ggtt_clear_range;
1995
1996	if (unlikely(dev_priv->gtt.do_idle_maps))
1997		DRM_INFO("applying Ironlake quirks for intel_iommu\n");
 
 
 
 
 
 
1998
1999	return 0;
2000}
2001
2002static void i915_gmch_remove(struct i915_address_space *vm)
2003{
2004	intel_gmch_remove();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2005}
2006
2007int i915_gem_gtt_init(struct drm_device *dev)
 
 
 
 
2008{
2009	struct drm_i915_private *dev_priv = dev->dev_private;
2010	struct i915_gtt *gtt = &dev_priv->gtt;
2011	int ret;
2012
2013	if (INTEL_INFO(dev)->gen <= 5) {
2014		gtt->gtt_probe = i915_gmch_probe;
2015		gtt->base.cleanup = i915_gmch_remove;
2016	} else if (INTEL_INFO(dev)->gen < 8) {
2017		gtt->gtt_probe = gen6_gmch_probe;
2018		gtt->base.cleanup = gen6_gmch_remove;
2019		if (IS_HASWELL(dev) && dev_priv->ellc_size)
2020			gtt->base.pte_encode = iris_pte_encode;
2021		else if (IS_HASWELL(dev))
2022			gtt->base.pte_encode = hsw_pte_encode;
2023		else if (IS_VALLEYVIEW(dev))
2024			gtt->base.pte_encode = byt_pte_encode;
2025		else if (INTEL_INFO(dev)->gen >= 7)
2026			gtt->base.pte_encode = ivb_pte_encode;
2027		else
2028			gtt->base.pte_encode = snb_pte_encode;
2029	} else {
2030		dev_priv->gtt.gtt_probe = gen8_gmch_probe;
2031		dev_priv->gtt.base.cleanup = gen6_gmch_remove;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2032	}
2033
2034	ret = gtt->gtt_probe(dev, &gtt->base.total, &gtt->stolen_size,
2035			     &gtt->mappable_base, &gtt->mappable_end);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2036	if (ret)
2037		return ret;
2038
2039	gtt->base.dev = dev;
2040
2041	/* GMADR is the PCI mmio aperture into the global GTT. */
2042	DRM_INFO("Memory usable by graphics device = %zdM\n",
2043		 gtt->base.total >> 20);
2044	DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20);
2045	DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20);
2046	/*
2047	 * i915.enable_ppgtt is read-only, so do an early pass to validate the
2048	 * user's requested state against the hardware/driver capabilities.  We
2049	 * do this now so that we can print out any log messages once rather
2050	 * than every time we check intel_enable_ppgtt().
2051	 */
2052	i915.enable_ppgtt = sanitize_enable_ppgtt(dev, i915.enable_ppgtt);
2053	DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
 
 
 
 
 
 
 
 
 
 
 
 
 
2054
2055	return 0;
2056}
2057
2058static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
2059					      struct i915_address_space *vm)
2060{
2061	struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
2062	if (vma == NULL)
2063		return ERR_PTR(-ENOMEM);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2064
2065	INIT_LIST_HEAD(&vma->vma_link);
2066	INIT_LIST_HEAD(&vma->mm_list);
2067	INIT_LIST_HEAD(&vma->exec_list);
2068	vma->vm = vm;
2069	vma->obj = obj;
2070
2071	switch (INTEL_INFO(vm->dev)->gen) {
2072	case 8:
2073	case 7:
2074	case 6:
2075		if (i915_is_ggtt(vm)) {
2076			vma->unbind_vma = ggtt_unbind_vma;
2077			vma->bind_vma = ggtt_bind_vma;
2078		} else {
2079			vma->unbind_vma = ppgtt_unbind_vma;
2080			vma->bind_vma = ppgtt_bind_vma;
2081		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2082		break;
2083	case 5:
2084	case 4:
2085	case 3:
2086	case 2:
2087		BUG_ON(!i915_is_ggtt(vm));
2088		vma->unbind_vma = i915_ggtt_unbind_vma;
2089		vma->bind_vma = i915_ggtt_bind_vma;
2090		break;
2091	default:
2092		BUG();
2093	}
2094
2095	/* Keep GGTT vmas first to make debug easier */
2096	if (i915_is_ggtt(vm))
2097		list_add(&vma->vma_link, &obj->vma_list);
2098	else
2099		list_add_tail(&vma->vma_link, &obj->vma_list);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2100
2101	return vma;
2102}
2103
2104struct i915_vma *
2105i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
2106				  struct i915_address_space *vm)
2107{
2108	struct i915_vma *vma;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2109
2110	vma = i915_gem_obj_to_vma(obj, vm);
2111	if (!vma)
2112		vma = __i915_gem_vma_create(obj, vm);
2113
2114	return vma;
 
 
 
 
 
 
 
 
2115}