Linux Audio

Check our new training course

Loading...
v4.6
   1/*
   2 * Copyright © 2010 Daniel Vetter
   3 * Copyright © 2011-2014 Intel Corporation
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the "Software"),
   7 * to deal in the Software without restriction, including without limitation
   8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   9 * and/or sell copies of the Software, and to permit persons to whom the
  10 * Software is furnished to do so, subject to the following conditions:
  11 *
  12 * The above copyright notice and this permission notice (including the next
  13 * paragraph) shall be included in all copies or substantial portions of the
  14 * Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  22 * IN THE SOFTWARE.
  23 *
  24 */
  25
 
 
 
 
 
  26#include <linux/seq_file.h>
  27#include <linux/stop_machine.h>
  28#include <drm/drmP.h>
 
 
 
  29#include <drm/i915_drm.h>
 
 
 
 
  30#include "i915_drv.h"
  31#include "i915_vgpu.h"
  32#include "i915_trace.h"
  33#include "intel_drv.h"
 
 
 
 
 
 
 
 
  34
  35/**
  36 * DOC: Global GTT views
  37 *
  38 * Background and previous state
  39 *
  40 * Historically objects could exists (be bound) in global GTT space only as
  41 * singular instances with a view representing all of the object's backing pages
  42 * in a linear fashion. This view will be called a normal view.
  43 *
  44 * To support multiple views of the same object, where the number of mapped
  45 * pages is not equal to the backing store, or where the layout of the pages
  46 * is not linear, concept of a GGTT view was added.
  47 *
  48 * One example of an alternative view is a stereo display driven by a single
  49 * image. In this case we would have a framebuffer looking like this
  50 * (2x2 pages):
  51 *
  52 *    12
  53 *    34
  54 *
  55 * Above would represent a normal GGTT view as normally mapped for GPU or CPU
  56 * rendering. In contrast, fed to the display engine would be an alternative
  57 * view which could look something like this:
  58 *
  59 *   1212
  60 *   3434
  61 *
  62 * In this example both the size and layout of pages in the alternative view is
  63 * different from the normal view.
  64 *
  65 * Implementation and usage
  66 *
  67 * GGTT views are implemented using VMAs and are distinguished via enum
  68 * i915_ggtt_view_type and struct i915_ggtt_view.
  69 *
  70 * A new flavour of core GEM functions which work with GGTT bound objects were
  71 * added with the _ggtt_ infix, and sometimes with _view postfix to avoid
  72 * renaming  in large amounts of code. They take the struct i915_ggtt_view
  73 * parameter encapsulating all metadata required to implement a view.
  74 *
  75 * As a helper for callers which are only interested in the normal view,
  76 * globally const i915_ggtt_view_normal singleton instance exists. All old core
  77 * GEM API functions, the ones not taking the view parameter, are operating on,
  78 * or with the normal GGTT view.
  79 *
  80 * Code wanting to add or use a new GGTT view needs to:
  81 *
  82 * 1. Add a new enum with a suitable name.
  83 * 2. Extend the metadata in the i915_ggtt_view structure if required.
  84 * 3. Add support to i915_get_vma_pages().
  85 *
  86 * New views are required to build a scatter-gather table from within the
  87 * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and
  88 * exists for the lifetime of an VMA.
  89 *
  90 * Core API is designed to have copy semantics which means that passed in
  91 * struct i915_ggtt_view does not need to be persistent (left around after
  92 * calling the core API functions).
  93 *
  94 */
  95
 
 
  96static int
  97i915_get_ggtt_vma_pages(struct i915_vma *vma);
  98
  99const struct i915_ggtt_view i915_ggtt_view_normal = {
 100	.type = I915_GGTT_VIEW_NORMAL,
 101};
 102const struct i915_ggtt_view i915_ggtt_view_rotated = {
 103	.type = I915_GGTT_VIEW_ROTATED,
 104};
 105
 106static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
 107{
 108	bool has_aliasing_ppgtt;
 109	bool has_full_ppgtt;
 110	bool has_full_48bit_ppgtt;
 111
 112	has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6;
 113	has_full_ppgtt = INTEL_INFO(dev)->gen >= 7;
 114	has_full_48bit_ppgtt = IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9;
 115
 116	if (intel_vgpu_active(dev))
 117		has_full_ppgtt = false; /* emulation is too hard */
 118
 119	/*
 120	 * We don't allow disabling PPGTT for gen9+ as it's a requirement for
 121	 * execlists, the sole mechanism available to submit work.
 122	 */
 123	if (INTEL_INFO(dev)->gen < 9 &&
 124	    (enable_ppgtt == 0 || !has_aliasing_ppgtt))
 125		return 0;
 126
 127	if (enable_ppgtt == 1)
 128		return 1;
 129
 130	if (enable_ppgtt == 2 && has_full_ppgtt)
 131		return 2;
 132
 133	if (enable_ppgtt == 3 && has_full_48bit_ppgtt)
 134		return 3;
 135
 136#ifdef CONFIG_INTEL_IOMMU
 137	/* Disable ppgtt on SNB if VT-d is on. */
 138	if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) {
 139		DRM_INFO("Disabling PPGTT because VT-d is on\n");
 140		return 0;
 141	}
 142#endif
 143
 144	/* Early VLV doesn't have this */
 145	if (IS_VALLEYVIEW(dev) && dev->pdev->revision < 0xb) {
 146		DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
 147		return 0;
 148	}
 149
 150	if (INTEL_INFO(dev)->gen >= 8 && i915.enable_execlists)
 151		return has_full_48bit_ppgtt ? 3 : 2;
 152	else
 153		return has_aliasing_ppgtt ? 1 : 0;
 154}
 155
 156static int ppgtt_bind_vma(struct i915_vma *vma,
 157			  enum i915_cache_level cache_level,
 158			  u32 unused)
 159{
 160	u32 pte_flags = 0;
 
 161
 162	/* Currently applicable only to VLV */
 163	if (vma->obj->gt_ro)
 
 
 
 
 
 
 
 
 164		pte_flags |= PTE_READ_ONLY;
 165
 166	vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start,
 167				cache_level, pte_flags);
 168
 169	return 0;
 170}
 171
 172static void ppgtt_unbind_vma(struct i915_vma *vma)
 173{
 174	vma->vm->clear_range(vma->vm,
 175			     vma->node.start,
 176			     vma->obj->base.size,
 177			     true);
 178}
 179
 180static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
 181				  enum i915_cache_level level,
 182				  bool valid)
 183{
 184	gen8_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0;
 185	pte |= addr;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 186
 187	switch (level) {
 188	case I915_CACHE_NONE:
 189		pte |= PPAT_UNCACHED_INDEX;
 190		break;
 191	case I915_CACHE_WT:
 192		pte |= PPAT_DISPLAY_ELLC_INDEX;
 193		break;
 194	default:
 195		pte |= PPAT_CACHED_INDEX;
 196		break;
 197	}
 198
 199	return pte;
 200}
 201
 202static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
 203				  const enum i915_cache_level level)
 204{
 205	gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
 206	pde |= addr;
 207	if (level != I915_CACHE_NONE)
 208		pde |= PPAT_CACHED_PDE_INDEX;
 209	else
 210		pde |= PPAT_UNCACHED_INDEX;
 211	return pde;
 212}
 213
 214#define gen8_pdpe_encode gen8_pde_encode
 215#define gen8_pml4e_encode gen8_pde_encode
 216
 217static gen6_pte_t snb_pte_encode(dma_addr_t addr,
 218				 enum i915_cache_level level,
 219				 bool valid, u32 unused)
 220{
 221	gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
 222	pte |= GEN6_PTE_ADDR_ENCODE(addr);
 223
 224	switch (level) {
 225	case I915_CACHE_L3_LLC:
 226	case I915_CACHE_LLC:
 227		pte |= GEN6_PTE_CACHE_LLC;
 228		break;
 229	case I915_CACHE_NONE:
 230		pte |= GEN6_PTE_UNCACHED;
 231		break;
 232	default:
 233		MISSING_CASE(level);
 234	}
 235
 236	return pte;
 237}
 238
 239static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
 240				 enum i915_cache_level level,
 241				 bool valid, u32 unused)
 242{
 243	gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
 244	pte |= GEN6_PTE_ADDR_ENCODE(addr);
 245
 246	switch (level) {
 247	case I915_CACHE_L3_LLC:
 248		pte |= GEN7_PTE_CACHE_L3_LLC;
 249		break;
 250	case I915_CACHE_LLC:
 251		pte |= GEN6_PTE_CACHE_LLC;
 252		break;
 253	case I915_CACHE_NONE:
 254		pte |= GEN6_PTE_UNCACHED;
 255		break;
 256	default:
 257		MISSING_CASE(level);
 258	}
 259
 260	return pte;
 261}
 262
 263static gen6_pte_t byt_pte_encode(dma_addr_t addr,
 264				 enum i915_cache_level level,
 265				 bool valid, u32 flags)
 266{
 267	gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
 268	pte |= GEN6_PTE_ADDR_ENCODE(addr);
 269
 270	if (!(flags & PTE_READ_ONLY))
 271		pte |= BYT_PTE_WRITEABLE;
 272
 273	if (level != I915_CACHE_NONE)
 274		pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
 275
 276	return pte;
 277}
 278
 279static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
 280				 enum i915_cache_level level,
 281				 bool valid, u32 unused)
 282{
 283	gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
 284	pte |= HSW_PTE_ADDR_ENCODE(addr);
 285
 286	if (level != I915_CACHE_NONE)
 287		pte |= HSW_WB_LLC_AGE3;
 288
 289	return pte;
 290}
 291
 292static gen6_pte_t iris_pte_encode(dma_addr_t addr,
 293				  enum i915_cache_level level,
 294				  bool valid, u32 unused)
 295{
 296	gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
 297	pte |= HSW_PTE_ADDR_ENCODE(addr);
 298
 299	switch (level) {
 300	case I915_CACHE_NONE:
 301		break;
 302	case I915_CACHE_WT:
 303		pte |= HSW_WT_ELLC_LLC_AGE3;
 304		break;
 305	default:
 306		pte |= HSW_WB_ELLC_LLC_AGE3;
 307		break;
 308	}
 309
 310	return pte;
 311}
 312
 313static int __setup_page_dma(struct drm_device *dev,
 314			    struct i915_page_dma *p, gfp_t flags)
 315{
 316	struct device *device = &dev->pdev->dev;
 317
 318	p->page = alloc_page(flags);
 319	if (!p->page)
 320		return -ENOMEM;
 321
 322	p->daddr = dma_map_page(device,
 323				p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL);
 324
 325	if (dma_mapping_error(device, p->daddr)) {
 326		__free_page(p->page);
 327		return -EINVAL;
 328	}
 329
 330	return 0;
 331}
 332
 333static int setup_page_dma(struct drm_device *dev, struct i915_page_dma *p)
 334{
 335	return __setup_page_dma(dev, p, GFP_KERNEL);
 336}
 337
 338static void cleanup_page_dma(struct drm_device *dev, struct i915_page_dma *p)
 339{
 340	if (WARN_ON(!p->page))
 341		return;
 342
 343	dma_unmap_page(&dev->pdev->dev, p->daddr, 4096, PCI_DMA_BIDIRECTIONAL);
 344	__free_page(p->page);
 345	memset(p, 0, sizeof(*p));
 346}
 347
 348static void *kmap_page_dma(struct i915_page_dma *p)
 349{
 350	return kmap_atomic(p->page);
 351}
 352
 353/* We use the flushing unmap only with ppgtt structures:
 354 * page directories, page tables and scratch pages.
 355 */
 356static void kunmap_page_dma(struct drm_device *dev, void *vaddr)
 357{
 358	/* There are only few exceptions for gen >=6. chv and bxt.
 359	 * And we are not sure about the latter so play safe for now.
 360	 */
 361	if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
 362		drm_clflush_virt_range(vaddr, PAGE_SIZE);
 363
 364	kunmap_atomic(vaddr);
 365}
 
 
 
 366
 367#define kmap_px(px) kmap_page_dma(px_base(px))
 368#define kunmap_px(ppgtt, vaddr) kunmap_page_dma((ppgtt)->base.dev, (vaddr))
 369
 370#define setup_px(dev, px) setup_page_dma((dev), px_base(px))
 371#define cleanup_px(dev, px) cleanup_page_dma((dev), px_base(px))
 372#define fill_px(dev, px, v) fill_page_dma((dev), px_base(px), (v))
 373#define fill32_px(dev, px, v) fill_page_dma_32((dev), px_base(px), (v))
 374
 375static void fill_page_dma(struct drm_device *dev, struct i915_page_dma *p,
 376			  const uint64_t val)
 377{
 378	int i;
 379	uint64_t * const vaddr = kmap_page_dma(p);
 380
 381	for (i = 0; i < 512; i++)
 382		vaddr[i] = val;
 383
 384	kunmap_page_dma(dev, vaddr);
 385}
 
 386
 387static void fill_page_dma_32(struct drm_device *dev, struct i915_page_dma *p,
 388			     const uint32_t val32)
 389{
 390	uint64_t v = val32;
 391
 392	v = v << 32 | val32;
 
 
 
 393
 394	fill_page_dma(dev, p, v);
 395}
 
 
 
 
 
 
 
 
 
 396
 397static struct i915_page_scratch *alloc_scratch_page(struct drm_device *dev)
 398{
 399	struct i915_page_scratch *sp;
 400	int ret;
 401
 402	sp = kzalloc(sizeof(*sp), GFP_KERNEL);
 403	if (sp == NULL)
 404		return ERR_PTR(-ENOMEM);
 405
 406	ret = __setup_page_dma(dev, px_base(sp), GFP_DMA32 | __GFP_ZERO);
 407	if (ret) {
 408		kfree(sp);
 409		return ERR_PTR(ret);
 410	}
 411
 412	set_pages_uc(px_page(sp), 1);
 
 
 413
 414	return sp;
 415}
 
 
 416
 417static void free_scratch_page(struct drm_device *dev,
 418			      struct i915_page_scratch *sp)
 419{
 420	set_pages_wb(px_page(sp), 1);
 
 421
 422	cleanup_px(dev, sp);
 423	kfree(sp);
 424}
 425
 426static struct i915_page_table *alloc_pt(struct drm_device *dev)
 
 427{
 428	struct i915_page_table *pt;
 429	const size_t count = INTEL_INFO(dev)->gen >= 8 ?
 430		GEN8_PTES : GEN6_PTES;
 431	int ret = -ENOMEM;
 432
 433	pt = kzalloc(sizeof(*pt), GFP_KERNEL);
 434	if (!pt)
 435		return ERR_PTR(-ENOMEM);
 436
 437	pt->used_ptes = kcalloc(BITS_TO_LONGS(count), sizeof(*pt->used_ptes),
 438				GFP_KERNEL);
 
 
 
 
 439
 440	if (!pt->used_ptes)
 441		goto fail_bitmap;
 
 
 
 
 
 
 442
 443	ret = setup_px(dev, pt);
 444	if (ret)
 445		goto fail_page_m;
 
 
 
 
 
 446
 447	return pt;
 
 448
 449fail_page_m:
 450	kfree(pt->used_ptes);
 451fail_bitmap:
 452	kfree(pt);
 453
 454	return ERR_PTR(ret);
 455}
 456
 457static void free_pt(struct drm_device *dev, struct i915_page_table *pt)
 458{
 459	cleanup_px(dev, pt);
 460	kfree(pt->used_ptes);
 461	kfree(pt);
 
 
 
 
 
 
 
 
 
 
 
 462}
 463
 464static void gen8_initialize_pt(struct i915_address_space *vm,
 465			       struct i915_page_table *pt)
 466{
 467	gen8_pte_t scratch_pte;
 
 
 
 
 468
 469	scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
 470				      I915_CACHE_LLC, true);
 471
 472	fill_px(vm->dev, pt, scratch_pte);
 473}
 474
 475static void gen6_initialize_pt(struct i915_address_space *vm,
 476			       struct i915_page_table *pt)
 477{
 478	gen6_pte_t scratch_pte;
 
 
 
 
 479
 480	WARN_ON(px_dma(vm->scratch_page) == 0);
 
 
 481
 482	scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
 483				     I915_CACHE_LLC, true, 0);
 484
 485	fill32_px(vm->dev, pt, scratch_pte);
 486}
 487
 488static struct i915_page_directory *alloc_pd(struct drm_device *dev)
 489{
 490	struct i915_page_directory *pd;
 491	int ret = -ENOMEM;
 492
 493	pd = kzalloc(sizeof(*pd), GFP_KERNEL);
 494	if (!pd)
 495		return ERR_PTR(-ENOMEM);
 496
 497	pd->used_pdes = kcalloc(BITS_TO_LONGS(I915_PDES),
 498				sizeof(*pd->used_pdes), GFP_KERNEL);
 499	if (!pd->used_pdes)
 500		goto fail_bitmap;
 501
 502	ret = setup_px(dev, pd);
 503	if (ret)
 504		goto fail_page_m;
 505
 506	return pd;
 507
 508fail_page_m:
 509	kfree(pd->used_pdes);
 510fail_bitmap:
 511	kfree(pd);
 512
 513	return ERR_PTR(ret);
 514}
 515
 516static void free_pd(struct drm_device *dev, struct i915_page_directory *pd)
 517{
 518	if (px_page(pd)) {
 519		cleanup_px(dev, pd);
 520		kfree(pd->used_pdes);
 521		kfree(pd);
 522	}
 
 
 
 523}
 524
 525static void gen8_initialize_pd(struct i915_address_space *vm,
 526			       struct i915_page_directory *pd)
 527{
 528	gen8_pde_t scratch_pde;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 529
 530	scratch_pde = gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC);
 531
 532	fill_px(vm->dev, pd, scratch_pde);
 
 533}
 534
 535static int __pdp_init(struct drm_device *dev,
 536		      struct i915_page_directory_pointer *pdp)
 
 537{
 538	size_t pdpes = I915_PDPES_PER_PDP(dev);
 539
 540	pdp->used_pdpes = kcalloc(BITS_TO_LONGS(pdpes),
 541				  sizeof(unsigned long),
 542				  GFP_KERNEL);
 543	if (!pdp->used_pdpes)
 544		return -ENOMEM;
 545
 546	pdp->page_directory = kcalloc(pdpes, sizeof(*pdp->page_directory),
 547				      GFP_KERNEL);
 548	if (!pdp->page_directory) {
 549		kfree(pdp->used_pdpes);
 550		/* the PDP might be the statically allocated top level. Keep it
 551		 * as clean as possible */
 552		pdp->used_pdpes = NULL;
 553		return -ENOMEM;
 554	}
 555
 556	return 0;
 557}
 558
 559static void __pdp_fini(struct i915_page_directory_pointer *pdp)
 
 560{
 561	kfree(pdp->used_pdpes);
 562	kfree(pdp->page_directory);
 563	pdp->page_directory = NULL;
 564}
 565
 566static struct
 567i915_page_directory_pointer *alloc_pdp(struct drm_device *dev)
 568{
 569	struct i915_page_directory_pointer *pdp;
 570	int ret = -ENOMEM;
 571
 572	WARN_ON(!USES_FULL_48BIT_PPGTT(dev));
 573
 574	pdp = kzalloc(sizeof(*pdp), GFP_KERNEL);
 575	if (!pdp)
 576		return ERR_PTR(-ENOMEM);
 577
 578	ret = __pdp_init(dev, pdp);
 579	if (ret)
 580		goto fail_bitmap;
 581
 582	ret = setup_px(dev, pdp);
 583	if (ret)
 584		goto fail_page_m;
 585
 586	return pdp;
 587
 588fail_page_m:
 589	__pdp_fini(pdp);
 590fail_bitmap:
 591	kfree(pdp);
 592
 593	return ERR_PTR(ret);
 594}
 595
 596static void free_pdp(struct drm_device *dev,
 597		     struct i915_page_directory_pointer *pdp)
 598{
 599	__pdp_fini(pdp);
 600	if (USES_FULL_48BIT_PPGTT(dev)) {
 601		cleanup_px(dev, pdp);
 602		kfree(pdp);
 603	}
 604}
 605
 606static void gen8_initialize_pdp(struct i915_address_space *vm,
 607				struct i915_page_directory_pointer *pdp)
 608{
 609	gen8_ppgtt_pdpe_t scratch_pdpe;
 610
 611	scratch_pdpe = gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC);
 612
 613	fill_px(vm->dev, pdp, scratch_pdpe);
 614}
 615
 616static void gen8_initialize_pml4(struct i915_address_space *vm,
 617				 struct i915_pml4 *pml4)
 618{
 619	gen8_ppgtt_pml4e_t scratch_pml4e;
 620
 621	scratch_pml4e = gen8_pml4e_encode(px_dma(vm->scratch_pdp),
 622					  I915_CACHE_LLC);
 623
 624	fill_px(vm->dev, pml4, scratch_pml4e);
 625}
 626
 627static void
 628gen8_setup_page_directory(struct i915_hw_ppgtt *ppgtt,
 629			  struct i915_page_directory_pointer *pdp,
 630			  struct i915_page_directory *pd,
 631			  int index)
 632{
 633	gen8_ppgtt_pdpe_t *page_directorypo;
 634
 635	if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
 636		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 637
 638	page_directorypo = kmap_px(pdp);
 639	page_directorypo[index] = gen8_pdpe_encode(px_dma(pd), I915_CACHE_LLC);
 640	kunmap_px(ppgtt, page_directorypo);
 
 
 
 
 
 
 
 
 641}
 642
 643static void
 644gen8_setup_page_directory_pointer(struct i915_hw_ppgtt *ppgtt,
 645				  struct i915_pml4 *pml4,
 646				  struct i915_page_directory_pointer *pdp,
 647				  int index)
 648{
 649	gen8_ppgtt_pml4e_t *pagemap = kmap_px(pml4);
 
 650
 651	WARN_ON(!USES_FULL_48BIT_PPGTT(ppgtt->base.dev));
 652	pagemap[index] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC);
 653	kunmap_px(ppgtt, pagemap);
 654}
 655
 656/* Broadwell Page Directory Pointer Descriptors */
 657static int gen8_write_pdp(struct drm_i915_gem_request *req,
 658			  unsigned entry,
 659			  dma_addr_t addr)
 660{
 661	struct intel_engine_cs *ring = req->ring;
 662	int ret;
 663
 664	BUG_ON(entry >= 4);
 665
 666	ret = intel_ring_begin(req, 6);
 667	if (ret)
 668		return ret;
 669
 670	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
 671	intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(ring, entry));
 672	intel_ring_emit(ring, upper_32_bits(addr));
 673	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
 674	intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(ring, entry));
 675	intel_ring_emit(ring, lower_32_bits(addr));
 676	intel_ring_advance(ring);
 677
 678	return 0;
 679}
 680
 681static int gen8_legacy_mm_switch(struct i915_hw_ppgtt *ppgtt,
 682				 struct drm_i915_gem_request *req)
 683{
 684	int i, ret;
 685
 686	for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
 687		const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
 
 688
 689		ret = gen8_write_pdp(req, i, pd_daddr);
 690		if (ret)
 691			return ret;
 692	}
 693
 694	return 0;
 695}
 696
 697static int gen8_48b_mm_switch(struct i915_hw_ppgtt *ppgtt,
 698			      struct drm_i915_gem_request *req)
 699{
 700	return gen8_write_pdp(req, 0, px_dma(&ppgtt->pml4));
 701}
 702
 703static void gen8_ppgtt_clear_pte_range(struct i915_address_space *vm,
 704				       struct i915_page_directory_pointer *pdp,
 705				       uint64_t start,
 706				       uint64_t length,
 707				       gen8_pte_t scratch_pte)
 708{
 709	struct i915_hw_ppgtt *ppgtt =
 710		container_of(vm, struct i915_hw_ppgtt, base);
 711	gen8_pte_t *pt_vaddr;
 712	unsigned pdpe = gen8_pdpe_index(start);
 713	unsigned pde = gen8_pde_index(start);
 714	unsigned pte = gen8_pte_index(start);
 715	unsigned num_entries = length >> PAGE_SHIFT;
 716	unsigned last_pte, i;
 717
 718	if (WARN_ON(!pdp))
 719		return;
 720
 721	while (num_entries) {
 722		struct i915_page_directory *pd;
 723		struct i915_page_table *pt;
 724
 725		if (WARN_ON(!pdp->page_directory[pdpe]))
 726			break;
 727
 728		pd = pdp->page_directory[pdpe];
 729
 730		if (WARN_ON(!pd->page_table[pde]))
 731			break;
 732
 733		pt = pd->page_table[pde];
 734
 735		if (WARN_ON(!px_page(pt)))
 736			break;
 737
 738		last_pte = pte + num_entries;
 739		if (last_pte > GEN8_PTES)
 740			last_pte = GEN8_PTES;
 741
 742		pt_vaddr = kmap_px(pt);
 743
 744		for (i = pte; i < last_pte; i++) {
 745			pt_vaddr[i] = scratch_pte;
 746			num_entries--;
 747		}
 748
 749		kunmap_px(ppgtt, pt);
 
 
 750
 751		pte = 0;
 752		if (++pde == I915_PDES) {
 753			if (++pdpe == I915_PDPES_PER_PDP(vm->dev))
 754				break;
 755			pde = 0;
 756		}
 757	}
 758}
 759
 760static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
 761				   uint64_t start,
 762				   uint64_t length,
 763				   bool use_scratch)
 764{
 765	struct i915_hw_ppgtt *ppgtt =
 766		container_of(vm, struct i915_hw_ppgtt, base);
 767	gen8_pte_t scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
 768						 I915_CACHE_LLC, use_scratch);
 769
 770	if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
 771		gen8_ppgtt_clear_pte_range(vm, &ppgtt->pdp, start, length,
 772					   scratch_pte);
 773	} else {
 774		uint64_t pml4e;
 775		struct i915_page_directory_pointer *pdp;
 776
 777		gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, pml4e) {
 778			gen8_ppgtt_clear_pte_range(vm, pdp, start, length,
 779						   scratch_pte);
 780		}
 781	}
 782}
 783
 784static void
 785gen8_ppgtt_insert_pte_entries(struct i915_address_space *vm,
 786			      struct i915_page_directory_pointer *pdp,
 787			      struct sg_page_iter *sg_iter,
 788			      uint64_t start,
 789			      enum i915_cache_level cache_level)
 790{
 791	struct i915_hw_ppgtt *ppgtt =
 792		container_of(vm, struct i915_hw_ppgtt, base);
 793	gen8_pte_t *pt_vaddr;
 794	unsigned pdpe = gen8_pdpe_index(start);
 795	unsigned pde = gen8_pde_index(start);
 796	unsigned pte = gen8_pte_index(start);
 797
 798	pt_vaddr = NULL;
 799
 800	while (__sg_page_iter_next(sg_iter)) {
 801		if (pt_vaddr == NULL) {
 802			struct i915_page_directory *pd = pdp->page_directory[pdpe];
 803			struct i915_page_table *pt = pd->page_table[pde];
 804			pt_vaddr = kmap_px(pt);
 805		}
 806
 807		pt_vaddr[pte] =
 808			gen8_pte_encode(sg_page_iter_dma_address(sg_iter),
 809					cache_level, true);
 810		if (++pte == GEN8_PTES) {
 811			kunmap_px(ppgtt, pt_vaddr);
 812			pt_vaddr = NULL;
 813			if (++pde == I915_PDES) {
 814				if (++pdpe == I915_PDPES_PER_PDP(vm->dev))
 815					break;
 816				pde = 0;
 817			}
 818			pte = 0;
 819		}
 820	}
 821
 822	if (pt_vaddr)
 823		kunmap_px(ppgtt, pt_vaddr);
 824}
 825
 826static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
 827				      struct sg_table *pages,
 828				      uint64_t start,
 829				      enum i915_cache_level cache_level,
 830				      u32 unused)
 831{
 832	struct i915_hw_ppgtt *ppgtt =
 833		container_of(vm, struct i915_hw_ppgtt, base);
 834	struct sg_page_iter sg_iter;
 835
 836	__sg_page_iter_start(&sg_iter, pages->sgl, sg_nents(pages->sgl), 0);
 837
 838	if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
 839		gen8_ppgtt_insert_pte_entries(vm, &ppgtt->pdp, &sg_iter, start,
 840					      cache_level);
 841	} else {
 842		struct i915_page_directory_pointer *pdp;
 843		uint64_t pml4e;
 844		uint64_t length = (uint64_t)pages->orig_nents << PAGE_SHIFT;
 845
 846		gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, pml4e) {
 847			gen8_ppgtt_insert_pte_entries(vm, pdp, &sg_iter,
 848						      start, cache_level);
 849		}
 850	}
 851}
 852
 853static void gen8_free_page_tables(struct drm_device *dev,
 854				  struct i915_page_directory *pd)
 
 
 
 855{
 856	int i;
 
 857
 858	if (!px_page(pd))
 859		return;
 860
 861	for_each_set_bit(i, pd->used_pdes, I915_PDES) {
 862		if (WARN_ON(!pd->page_table[i]))
 863			continue;
 864
 865		free_pt(dev, pd->page_table[i]);
 866		pd->page_table[i] = NULL;
 867	}
 868}
 869
 870static int gen8_init_scratch(struct i915_address_space *vm)
 
 
 
 
 
 
 871{
 872	struct drm_device *dev = vm->dev;
 873
 874	vm->scratch_page = alloc_scratch_page(dev);
 875	if (IS_ERR(vm->scratch_page))
 876		return PTR_ERR(vm->scratch_page);
 
 877
 878	vm->scratch_pt = alloc_pt(dev);
 879	if (IS_ERR(vm->scratch_pt)) {
 880		free_scratch_page(dev, vm->scratch_page);
 881		return PTR_ERR(vm->scratch_pt);
 882	}
 
 
 883
 884	vm->scratch_pd = alloc_pd(dev);
 885	if (IS_ERR(vm->scratch_pd)) {
 886		free_pt(dev, vm->scratch_pt);
 887		free_scratch_page(dev, vm->scratch_page);
 888		return PTR_ERR(vm->scratch_pd);
 889	}
 890
 891	if (USES_FULL_48BIT_PPGTT(dev)) {
 892		vm->scratch_pdp = alloc_pdp(dev);
 893		if (IS_ERR(vm->scratch_pdp)) {
 894			free_pd(dev, vm->scratch_pd);
 895			free_pt(dev, vm->scratch_pt);
 896			free_scratch_page(dev, vm->scratch_page);
 897			return PTR_ERR(vm->scratch_pdp);
 898		}
 899	}
 
 900
 901	gen8_initialize_pt(vm, vm->scratch_pt);
 902	gen8_initialize_pd(vm, vm->scratch_pd);
 903	if (USES_FULL_48BIT_PPGTT(dev))
 904		gen8_initialize_pdp(vm, vm->scratch_pdp);
 905
 906	return 0;
 
 
 
 
 
 
 
 
 907}
 908
 909static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
 910{
 
 911	enum vgt_g2v_type msg;
 912	struct drm_device *dev = ppgtt->base.dev;
 913	struct drm_i915_private *dev_priv = dev->dev_private;
 914	int i;
 915
 916	if (USES_FULL_48BIT_PPGTT(dev)) {
 917		u64 daddr = px_dma(&ppgtt->pml4);
 
 
 
 
 
 
 
 918
 919		I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
 920		I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
 921
 922		msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
 923				VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY);
 924	} else {
 925		for (i = 0; i < GEN8_LEGACY_PDPES; i++) {
 926			u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
 927
 928			I915_WRITE(vgtif_reg(pdp[i].lo), lower_32_bits(daddr));
 929			I915_WRITE(vgtif_reg(pdp[i].hi), upper_32_bits(daddr));
 930		}
 931
 932		msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
 933				VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY);
 934	}
 935
 
 936	I915_WRITE(vgtif_reg(g2v_notify), msg);
 937
 938	return 0;
 939}
 940
 941static void gen8_free_scratch(struct i915_address_space *vm)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 942{
 943	struct drm_device *dev = vm->dev;
 944
 945	if (USES_FULL_48BIT_PPGTT(dev))
 946		free_pdp(dev, vm->scratch_pdp);
 947	free_pd(dev, vm->scratch_pd);
 948	free_pt(dev, vm->scratch_pt);
 949	free_scratch_page(dev, vm->scratch_page);
 950}
 951
 952static void gen8_ppgtt_cleanup_3lvl(struct drm_device *dev,
 953				    struct i915_page_directory_pointer *pdp)
 954{
 955	int i;
 
 
 
 
 
 956
 957	for_each_set_bit(i, pdp->used_pdpes, I915_PDPES_PER_PDP(dev)) {
 958		if (WARN_ON(!pdp->page_directory[i]))
 959			continue;
 
 
 960
 961		gen8_free_page_tables(dev, pdp->page_directory[i]);
 962		free_pd(dev, pdp->page_directory[i]);
 963	}
 
 964
 965	free_pdp(dev, pdp);
 
 
 
 966}
 967
 968static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt)
 
 969{
 970	int i;
 
 971
 972	for_each_set_bit(i, ppgtt->pml4.used_pml4es, GEN8_PML4ES_PER_PML4) {
 973		if (WARN_ON(!ppgtt->pml4.pdps[i]))
 974			continue;
 
 
 
 
 
 
 
 975
 976		gen8_ppgtt_cleanup_3lvl(ppgtt->base.dev, ppgtt->pml4.pdps[i]);
 
 977	}
 978
 979	cleanup_px(ppgtt->base.dev, &ppgtt->pml4);
 980}
 981
 982static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
 983{
 984	struct i915_hw_ppgtt *ppgtt =
 985		container_of(vm, struct i915_hw_ppgtt, base);
 986
 987	if (intel_vgpu_active(vm->dev))
 988		gen8_ppgtt_notify_vgt(ppgtt, false);
 989
 990	if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
 991		gen8_ppgtt_cleanup_3lvl(ppgtt->base.dev, &ppgtt->pdp);
 992	else
 993		gen8_ppgtt_cleanup_4lvl(ppgtt);
 994
 995	gen8_free_scratch(vm);
 996}
 997
 998/**
 999 * gen8_ppgtt_alloc_pagetabs() - Allocate page tables for VA range.
1000 * @vm:	Master vm structure.
1001 * @pd:	Page directory for this address range.
1002 * @start:	Starting virtual address to begin allocations.
1003 * @length:	Size of the allocations.
1004 * @new_pts:	Bitmap set by function with new allocations. Likely used by the
1005 *		caller to free on error.
1006 *
1007 * Allocate the required number of page tables. Extremely similar to
1008 * gen8_ppgtt_alloc_page_directories(). The main difference is here we are limited by
1009 * the page directory boundary (instead of the page directory pointer). That
1010 * boundary is 1GB virtual. Therefore, unlike gen8_ppgtt_alloc_page_directories(), it is
1011 * possible, and likely that the caller will need to use multiple calls of this
1012 * function to achieve the appropriate allocation.
1013 *
1014 * Return: 0 if success; negative error code otherwise.
1015 */
1016static int gen8_ppgtt_alloc_pagetabs(struct i915_address_space *vm,
1017				     struct i915_page_directory *pd,
1018				     uint64_t start,
1019				     uint64_t length,
1020				     unsigned long *new_pts)
1021{
1022	struct drm_device *dev = vm->dev;
1023	struct i915_page_table *pt;
1024	uint32_t pde;
1025
1026	gen8_for_each_pde(pt, pd, start, length, pde) {
1027		/* Don't reallocate page tables */
1028		if (test_bit(pde, pd->used_pdes)) {
1029			/* Scratch is never allocated this way */
1030			WARN_ON(pt == vm->scratch_pt);
1031			continue;
1032		}
1033
1034		pt = alloc_pt(dev);
1035		if (IS_ERR(pt))
1036			goto unwind_out;
1037
1038		gen8_initialize_pt(vm, pt);
1039		pd->page_table[pde] = pt;
1040		__set_bit(pde, new_pts);
1041		trace_i915_page_table_entry_alloc(vm, pde, start, GEN8_PDE_SHIFT);
1042	}
1043
1044	return 0;
 
 
 
 
 
1045
1046unwind_out:
1047	for_each_set_bit(pde, new_pts, I915_PDES)
1048		free_pt(dev, pd->page_table[pde]);
1049
1050	return -ENOMEM;
1051}
1052
1053/**
1054 * gen8_ppgtt_alloc_page_directories() - Allocate page directories for VA range.
1055 * @vm:	Master vm structure.
1056 * @pdp:	Page directory pointer for this address range.
1057 * @start:	Starting virtual address to begin allocations.
1058 * @length:	Size of the allocations.
1059 * @new_pds:	Bitmap set by function with new allocations. Likely used by the
1060 *		caller to free on error.
1061 *
1062 * Allocate the required number of page directories starting at the pde index of
1063 * @start, and ending at the pde index @start + @length. This function will skip
1064 * over already allocated page directories within the range, and only allocate
1065 * new ones, setting the appropriate pointer within the pdp as well as the
1066 * correct position in the bitmap @new_pds.
1067 *
1068 * The function will only allocate the pages within the range for a give page
1069 * directory pointer. In other words, if @start + @length straddles a virtually
1070 * addressed PDP boundary (512GB for 4k pages), there will be more allocations
1071 * required by the caller, This is not currently possible, and the BUG in the
1072 * code will prevent it.
1073 *
1074 * Return: 0 if success; negative error code otherwise.
1075 */
1076static int
1077gen8_ppgtt_alloc_page_directories(struct i915_address_space *vm,
1078				  struct i915_page_directory_pointer *pdp,
1079				  uint64_t start,
1080				  uint64_t length,
1081				  unsigned long *new_pds)
1082{
1083	struct drm_device *dev = vm->dev;
1084	struct i915_page_directory *pd;
1085	uint32_t pdpe;
1086	uint32_t pdpes = I915_PDPES_PER_PDP(dev);
1087
1088	WARN_ON(!bitmap_empty(new_pds, pdpes));
 
 
1089
1090	gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1091		if (test_bit(pdpe, pdp->used_pdpes))
1092			continue;
1093
1094		pd = alloc_pd(dev);
1095		if (IS_ERR(pd))
1096			goto unwind_out;
1097
1098		gen8_initialize_pd(vm, pd);
1099		pdp->page_directory[pdpe] = pd;
1100		__set_bit(pdpe, new_pds);
1101		trace_i915_page_directory_entry_alloc(vm, pdpe, start, GEN8_PDPE_SHIFT);
1102	}
1103
1104	return 0;
 
 
 
 
 
1105
1106unwind_out:
1107	for_each_set_bit(pdpe, new_pds, pdpes)
1108		free_pd(dev, pdp->page_directory[pdpe]);
1109
1110	return -ENOMEM;
 
1111}
1112
1113/**
1114 * gen8_ppgtt_alloc_page_dirpointers() - Allocate pdps for VA range.
1115 * @vm:	Master vm structure.
1116 * @pml4:	Page map level 4 for this address range.
1117 * @start:	Starting virtual address to begin allocations.
1118 * @length:	Size of the allocations.
1119 * @new_pdps:	Bitmap set by function with new allocations. Likely used by the
1120 *		caller to free on error.
1121 *
1122 * Allocate the required number of page directory pointers. Extremely similar to
1123 * gen8_ppgtt_alloc_page_directories() and gen8_ppgtt_alloc_pagetabs().
1124 * The main difference is here we are limited by the pml4 boundary (instead of
1125 * the page directory pointer).
1126 *
1127 * Return: 0 if success; negative error code otherwise.
1128 */
1129static int
1130gen8_ppgtt_alloc_page_dirpointers(struct i915_address_space *vm,
1131				  struct i915_pml4 *pml4,
1132				  uint64_t start,
1133				  uint64_t length,
1134				  unsigned long *new_pdps)
1135{
1136	struct drm_device *dev = vm->dev;
1137	struct i915_page_directory_pointer *pdp;
1138	uint32_t pml4e;
1139
1140	WARN_ON(!bitmap_empty(new_pdps, GEN8_PML4ES_PER_PML4));
1141
1142	gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
1143		if (!test_bit(pml4e, pml4->used_pml4es)) {
1144			pdp = alloc_pdp(dev);
1145			if (IS_ERR(pdp))
1146				goto unwind_out;
1147
1148			gen8_initialize_pdp(vm, pdp);
1149			pml4->pdps[pml4e] = pdp;
1150			__set_bit(pml4e, new_pdps);
1151			trace_i915_page_directory_pointer_entry_alloc(vm,
1152								      pml4e,
1153								      start,
1154								      GEN8_PML4E_SHIFT);
1155		}
1156	}
1157
1158	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1159
1160unwind_out:
1161	for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4)
1162		free_pdp(dev, pml4->pdps[pml4e]);
 
 
 
 
 
 
1163
1164	return -ENOMEM;
1165}
 
 
1166
1167static void
1168free_gen8_temp_bitmaps(unsigned long *new_pds, unsigned long *new_pts)
1169{
1170	kfree(new_pts);
1171	kfree(new_pds);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1172}
1173
1174/* Fills in the page directory bitmap, and the array of page tables bitmap. Both
1175 * of these are based on the number of PDPEs in the system.
1176 */
1177static
1178int __must_check alloc_gen8_temp_bitmaps(unsigned long **new_pds,
1179					 unsigned long **new_pts,
1180					 uint32_t pdpes)
1181{
1182	unsigned long *pds;
1183	unsigned long *pts;
1184
1185	pds = kcalloc(BITS_TO_LONGS(pdpes), sizeof(unsigned long), GFP_TEMPORARY);
1186	if (!pds)
1187		return -ENOMEM;
1188
1189	pts = kcalloc(pdpes, BITS_TO_LONGS(I915_PDES) * sizeof(unsigned long),
1190		      GFP_TEMPORARY);
1191	if (!pts)
1192		goto err_out;
1193
1194	*new_pds = pds;
1195	*new_pts = pts;
 
 
1196
1197	return 0;
 
 
 
 
1198
1199err_out:
1200	free_gen8_temp_bitmaps(pds, pts);
1201	return -ENOMEM;
1202}
1203
1204/* PDE TLBs are a pain to invalidate on GEN8+. When we modify
1205 * the page table structures, we mark them dirty so that
1206 * context switching/execlist queuing code takes extra steps
1207 * to ensure that tlbs are flushed.
1208 */
1209static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
1210{
1211	ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.dev)->ring_mask;
1212}
1213
1214static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm,
1215				    struct i915_page_directory_pointer *pdp,
1216				    uint64_t start,
1217				    uint64_t length)
 
 
 
1218{
1219	struct i915_hw_ppgtt *ppgtt =
1220		container_of(vm, struct i915_hw_ppgtt, base);
1221	unsigned long *new_page_dirs, *new_page_tables;
1222	struct drm_device *dev = vm->dev;
1223	struct i915_page_directory *pd;
1224	const uint64_t orig_start = start;
1225	const uint64_t orig_length = length;
1226	uint32_t pdpe;
1227	uint32_t pdpes = I915_PDPES_PER_PDP(dev);
1228	int ret;
1229
1230	/* Wrap is never okay since we can only represent 48b, and we don't
1231	 * actually use the other side of the canonical address space.
1232	 */
1233	if (WARN_ON(start + length < start))
1234		return -ENODEV;
 
 
 
 
 
 
 
1235
1236	if (WARN_ON(start + length > vm->total))
1237		return -ENODEV;
 
1238
1239	ret = alloc_gen8_temp_bitmaps(&new_page_dirs, &new_page_tables, pdpes);
1240	if (ret)
1241		return ret;
 
 
1242
1243	/* Do the allocations first so we can easily bail out */
1244	ret = gen8_ppgtt_alloc_page_directories(vm, pdp, start, length,
1245						new_page_dirs);
1246	if (ret) {
1247		free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
1248		return ret;
1249	}
1250
1251	/* For every page directory referenced, allocate page tables */
1252	gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1253		ret = gen8_ppgtt_alloc_pagetabs(vm, pd, start, length,
1254						new_page_tables + pdpe * BITS_TO_LONGS(I915_PDES));
1255		if (ret)
1256			goto err_out;
1257	}
1258
1259	start = orig_start;
1260	length = orig_length;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1261
1262	/* Allocations have completed successfully, so set the bitmaps, and do
1263	 * the mappings. */
1264	gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1265		gen8_pde_t *const page_directory = kmap_px(pd);
1266		struct i915_page_table *pt;
1267		uint64_t pd_len = length;
1268		uint64_t pd_start = start;
1269		uint32_t pde;
 
1270
1271		/* Every pd should be allocated, we just did that above. */
1272		WARN_ON(!pd);
 
 
1273
1274		gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) {
1275			/* Same reasoning as pd */
1276			WARN_ON(!pt);
1277			WARN_ON(!pd_len);
1278			WARN_ON(!gen8_pte_count(pd_start, pd_len));
1279
1280			/* Set our used ptes within the page table */
1281			bitmap_set(pt->used_ptes,
1282				   gen8_pte_index(pd_start),
1283				   gen8_pte_count(pd_start, pd_len));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1284
1285			/* Our pde is now pointing to the pagetable, pt */
1286			__set_bit(pde, pd->used_pdes);
1287
1288			/* Map the PDE to the page table */
1289			page_directory[pde] = gen8_pde_encode(px_dma(pt),
1290							      I915_CACHE_LLC);
1291			trace_i915_page_table_entry_map(&ppgtt->base, pde, pt,
1292							gen8_pte_index(start),
1293							gen8_pte_count(start, length),
1294							GEN8_PTES);
1295
1296			/* NB: We haven't yet mapped ptes to pages. At this
1297			 * point we're still relying on insert_entries() */
1298		}
1299
1300		kunmap_px(ppgtt, page_directory);
1301		__set_bit(pdpe, pdp->used_pdpes);
1302		gen8_setup_page_directory(ppgtt, pdp, pd, pdpe);
1303	}
1304
1305	free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
1306	mark_tlbs_dirty(ppgtt);
1307	return 0;
 
 
 
 
1308
1309err_out:
1310	while (pdpe--) {
1311		unsigned long temp;
 
1312
1313		for_each_set_bit(temp, new_page_tables + pdpe *
1314				BITS_TO_LONGS(I915_PDES), I915_PDES)
1315			free_pt(dev, pdp->page_directory[pdpe]->page_table[temp]);
1316	}
1317
1318	for_each_set_bit(pdpe, new_page_dirs, pdpes)
1319		free_pd(dev, pdp->page_directory[pdpe]);
 
1320
1321	free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
1322	mark_tlbs_dirty(ppgtt);
1323	return ret;
1324}
1325
1326static int gen8_alloc_va_range_4lvl(struct i915_address_space *vm,
1327				    struct i915_pml4 *pml4,
1328				    uint64_t start,
1329				    uint64_t length)
1330{
1331	DECLARE_BITMAP(new_pdps, GEN8_PML4ES_PER_PML4);
1332	struct i915_hw_ppgtt *ppgtt =
1333			container_of(vm, struct i915_hw_ppgtt, base);
1334	struct i915_page_directory_pointer *pdp;
1335	uint64_t pml4e;
1336	int ret = 0;
1337
1338	/* Do the pml4 allocations first, so we don't need to track the newly
1339	 * allocated tables below the pdp */
1340	bitmap_zero(new_pdps, GEN8_PML4ES_PER_PML4);
1341
1342	/* The pagedirectory and pagetable allocations are done in the shared 3
1343	 * and 4 level code. Just allocate the pdps.
 
1344	 */
1345	ret = gen8_ppgtt_alloc_page_dirpointers(vm, pml4, start, length,
1346						new_pdps);
 
 
 
 
 
 
 
 
 
 
 
 
1347	if (ret)
1348		return ret;
1349
1350	WARN(bitmap_weight(new_pdps, GEN8_PML4ES_PER_PML4) > 2,
1351	     "The allocation has spanned more than 512GB. "
1352	     "It is highly likely this is incorrect.");
1353
1354	gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
1355		WARN_ON(!pdp);
1356
1357		ret = gen8_alloc_va_range_3lvl(vm, pdp, start, length);
1358		if (ret)
1359			goto err_out;
1360
1361		gen8_setup_page_directory_pointer(ppgtt, pml4, pdp, pml4e);
 
 
 
1362	}
1363
1364	bitmap_or(pml4->used_pml4es, new_pdps, pml4->used_pml4es,
1365		  GEN8_PML4ES_PER_PML4);
1366
1367	return 0;
1368
1369err_out:
1370	for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4)
1371		gen8_ppgtt_cleanup_3lvl(vm->dev, pml4->pdps[pml4e]);
1372
1373	return ret;
1374}
1375
1376static int gen8_alloc_va_range(struct i915_address_space *vm,
1377			       uint64_t start, uint64_t length)
1378{
1379	struct i915_hw_ppgtt *ppgtt =
1380		container_of(vm, struct i915_hw_ppgtt, base);
1381
1382	if (USES_FULL_48BIT_PPGTT(vm->dev))
1383		return gen8_alloc_va_range_4lvl(vm, &ppgtt->pml4, start, length);
1384	else
1385		return gen8_alloc_va_range_3lvl(vm, &ppgtt->pdp, start, length);
1386}
1387
1388static void gen8_dump_pdp(struct i915_page_directory_pointer *pdp,
1389			  uint64_t start, uint64_t length,
1390			  gen8_pte_t scratch_pte,
1391			  struct seq_file *m)
1392{
1393	struct i915_page_directory *pd;
1394	uint32_t pdpe;
1395
1396	gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1397		struct i915_page_table *pt;
1398		uint64_t pd_len = length;
1399		uint64_t pd_start = start;
1400		uint32_t pde;
1401
1402		if (!test_bit(pdpe, pdp->used_pdpes))
1403			continue;
1404
1405		seq_printf(m, "\tPDPE #%d\n", pdpe);
1406		gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) {
1407			uint32_t  pte;
1408			gen8_pte_t *pt_vaddr;
1409
1410			if (!test_bit(pde, pd->used_pdes))
1411				continue;
 
1412
1413			pt_vaddr = kmap_px(pt);
1414			for (pte = 0; pte < GEN8_PTES; pte += 4) {
1415				uint64_t va =
1416					(pdpe << GEN8_PDPE_SHIFT) |
1417					(pde << GEN8_PDE_SHIFT) |
1418					(pte << GEN8_PTE_SHIFT);
1419				int i;
1420				bool found = false;
1421
1422				for (i = 0; i < 4; i++)
1423					if (pt_vaddr[pte + i] != scratch_pte)
1424						found = true;
1425				if (!found)
1426					continue;
1427
1428				seq_printf(m, "\t\t0x%llx [%03d,%03d,%04d]: =", va, pdpe, pde, pte);
1429				for (i = 0; i < 4; i++) {
1430					if (pt_vaddr[pte + i] != scratch_pte)
1431						seq_printf(m, " %llx", pt_vaddr[pte + i]);
1432					else
1433						seq_puts(m, "  SCRATCH ");
1434				}
1435				seq_puts(m, "\n");
1436			}
1437			/* don't use kunmap_px, it could trigger
1438			 * an unnecessary flush.
1439			 */
1440			kunmap_atomic(pt_vaddr);
1441		}
1442	}
 
 
1443}
1444
1445static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
1446{
1447	struct i915_address_space *vm = &ppgtt->base;
1448	uint64_t start = ppgtt->base.start;
1449	uint64_t length = ppgtt->base.total;
1450	gen8_pte_t scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
1451						 I915_CACHE_LLC, true);
1452
1453	if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
1454		gen8_dump_pdp(&ppgtt->pdp, start, length, scratch_pte, m);
1455	} else {
1456		uint64_t pml4e;
1457		struct i915_pml4 *pml4 = &ppgtt->pml4;
1458		struct i915_page_directory_pointer *pdp;
1459
1460		gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
1461			if (!test_bit(pml4e, pml4->used_pml4es))
1462				continue;
1463
1464			seq_printf(m, "    PML4E #%llu\n", pml4e);
1465			gen8_dump_pdp(pdp, start, length, scratch_pte, m);
1466		}
1467	}
1468}
1469
1470static int gen8_preallocate_top_level_pdps(struct i915_hw_ppgtt *ppgtt)
 
1471{
1472	unsigned long *new_page_dirs, *new_page_tables;
1473	uint32_t pdpes = I915_PDPES_PER_PDP(dev);
1474	int ret;
1475
1476	/* We allocate temp bitmap for page tables for no gain
1477	 * but as this is for init only, lets keep the things simple
1478	 */
1479	ret = alloc_gen8_temp_bitmaps(&new_page_dirs, &new_page_tables, pdpes);
1480	if (ret)
1481		return ret;
1482
1483	/* Allocate for all pdps regardless of how the ppgtt
1484	 * was defined.
1485	 */
1486	ret = gen8_ppgtt_alloc_page_directories(&ppgtt->base, &ppgtt->pdp,
1487						0, 1ULL << 32,
1488						new_page_dirs);
1489	if (!ret)
1490		*ppgtt->pdp.used_pdpes = *new_page_dirs;
1491
1492	free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
 
 
 
1493
1494	return ret;
 
 
1495}
1496
1497/*
1498 * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
1499 * with a net effect resembling a 2-level page table in normal x86 terms. Each
1500 * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
1501 * space.
1502 *
1503 */
1504static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
1505{
1506	int ret;
 
1507
1508	ret = gen8_init_scratch(&ppgtt->base);
1509	if (ret)
1510		return ret;
1511
1512	ppgtt->base.start = 0;
1513	ppgtt->base.cleanup = gen8_ppgtt_cleanup;
1514	ppgtt->base.allocate_va_range = gen8_alloc_va_range;
1515	ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
1516	ppgtt->base.clear_range = gen8_ppgtt_clear_range;
1517	ppgtt->base.unbind_vma = ppgtt_unbind_vma;
1518	ppgtt->base.bind_vma = ppgtt_bind_vma;
1519	ppgtt->debug_dump = gen8_dump_ppgtt;
1520
1521	if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
1522		ret = setup_px(ppgtt->base.dev, &ppgtt->pml4);
1523		if (ret)
1524			goto free_scratch;
 
 
 
1525
1526		gen8_initialize_pml4(&ppgtt->base, &ppgtt->pml4);
 
 
 
 
1527
1528		ppgtt->base.total = 1ULL << 48;
1529		ppgtt->switch_mm = gen8_48b_mm_switch;
1530	} else {
1531		ret = __pdp_init(ppgtt->base.dev, &ppgtt->pdp);
1532		if (ret)
1533			goto free_scratch;
1534
1535		ppgtt->base.total = 1ULL << 32;
1536		ppgtt->switch_mm = gen8_legacy_mm_switch;
1537		trace_i915_page_directory_pointer_entry_alloc(&ppgtt->base,
1538							      0, 0,
1539							      GEN8_PML4E_SHIFT);
1540
1541		if (intel_vgpu_active(ppgtt->base.dev)) {
1542			ret = gen8_preallocate_top_level_pdps(ppgtt);
1543			if (ret)
1544				goto free_scratch;
 
1545		}
1546	}
1547
1548	if (intel_vgpu_active(ppgtt->base.dev))
1549		gen8_ppgtt_notify_vgt(ppgtt, true);
 
1550
1551	return 0;
 
1552
1553free_scratch:
1554	gen8_free_scratch(&ppgtt->base);
1555	return ret;
1556}
1557
1558static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
1559{
1560	struct i915_address_space *vm = &ppgtt->base;
1561	struct i915_page_table *unused;
1562	gen6_pte_t scratch_pte;
1563	uint32_t pd_entry;
1564	uint32_t  pte, pde, temp;
1565	uint32_t start = ppgtt->base.start, length = ppgtt->base.total;
1566
1567	scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
1568				     I915_CACHE_LLC, true, 0);
1569
1570	gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde) {
1571		u32 expected;
1572		gen6_pte_t *pt_vaddr;
1573		const dma_addr_t pt_addr = px_dma(ppgtt->pd.page_table[pde]);
1574		pd_entry = readl(ppgtt->pd_addr + pde);
1575		expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID);
1576
1577		if (pd_entry != expected)
1578			seq_printf(m, "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n",
1579				   pde,
1580				   pd_entry,
1581				   expected);
1582		seq_printf(m, "\tPDE: %x\n", pd_entry);
1583
1584		pt_vaddr = kmap_px(ppgtt->pd.page_table[pde]);
1585
1586		for (pte = 0; pte < GEN6_PTES; pte+=4) {
1587			unsigned long va =
1588				(pde * PAGE_SIZE * GEN6_PTES) +
1589				(pte * PAGE_SIZE);
1590			int i;
1591			bool found = false;
1592			for (i = 0; i < 4; i++)
1593				if (pt_vaddr[pte + i] != scratch_pte)
1594					found = true;
1595			if (!found)
1596				continue;
1597
1598			seq_printf(m, "\t\t0x%lx [%03d,%04d]: =", va, pde, pte);
1599			for (i = 0; i < 4; i++) {
1600				if (pt_vaddr[pte + i] != scratch_pte)
1601					seq_printf(m, " %08x", pt_vaddr[pte + i]);
1602				else
1603					seq_puts(m, "  SCRATCH ");
1604			}
1605			seq_puts(m, "\n");
1606		}
1607		kunmap_px(ppgtt, pt_vaddr);
1608	}
1609}
1610
1611/* Write pde (index) from the page directory @pd to the page table @pt */
1612static void gen6_write_pde(struct i915_page_directory *pd,
1613			    const int pde, struct i915_page_table *pt)
 
1614{
1615	/* Caller needs to make sure the write completes if necessary */
1616	struct i915_hw_ppgtt *ppgtt =
1617		container_of(pd, struct i915_hw_ppgtt, pd);
1618	u32 pd_entry;
1619
1620	pd_entry = GEN6_PDE_ADDR_ENCODE(px_dma(pt));
1621	pd_entry |= GEN6_PDE_VALID;
1622
1623	writel(pd_entry, ppgtt->pd_addr + pde);
1624}
1625
1626/* Write all the page tables found in the ppgtt structure to incrementing page
1627 * directories. */
1628static void gen6_write_page_range(struct drm_i915_private *dev_priv,
1629				  struct i915_page_directory *pd,
1630				  uint32_t start, uint32_t length)
1631{
1632	struct i915_page_table *pt;
1633	uint32_t pde, temp;
 
 
 
1634
1635	gen6_for_each_pde(pt, pd, start, length, temp, pde)
1636		gen6_write_pde(pd, pde, pt);
1637
1638	/* Make sure write is complete before other code can use this page
1639	 * table. Also require for WC mapped PTEs */
1640	readl(dev_priv->gtt.gsm);
1641}
1642
1643static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
1644{
1645	BUG_ON(ppgtt->pd.base.ggtt_offset & 0x3f);
1646
1647	return (ppgtt->pd.base.ggtt_offset / 64) << 16;
1648}
1649
1650static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
1651			 struct drm_i915_gem_request *req)
1652{
1653	struct intel_engine_cs *ring = req->ring;
1654	int ret;
1655
1656	/* NB: TLBs must be flushed and invalidated before a switch */
1657	ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
1658	if (ret)
1659		return ret;
1660
1661	ret = intel_ring_begin(req, 6);
1662	if (ret)
1663		return ret;
1664
1665	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
1666	intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(ring));
1667	intel_ring_emit(ring, PP_DIR_DCLV_2G);
1668	intel_ring_emit_reg(ring, RING_PP_DIR_BASE(ring));
1669	intel_ring_emit(ring, get_pd_offset(ppgtt));
1670	intel_ring_emit(ring, MI_NOOP);
1671	intel_ring_advance(ring);
1672
1673	return 0;
1674}
1675
1676static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt,
1677			  struct drm_i915_gem_request *req)
1678{
1679	struct intel_engine_cs *ring = req->ring;
1680	struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
1681
1682	I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
1683	I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
1684	return 0;
1685}
1686
1687static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
1688			  struct drm_i915_gem_request *req)
1689{
1690	struct intel_engine_cs *ring = req->ring;
1691	int ret;
1692
1693	/* NB: TLBs must be flushed and invalidated before a switch */
1694	ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
1695	if (ret)
1696		return ret;
1697
1698	ret = intel_ring_begin(req, 6);
1699	if (ret)
1700		return ret;
1701
1702	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
1703	intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(ring));
1704	intel_ring_emit(ring, PP_DIR_DCLV_2G);
1705	intel_ring_emit_reg(ring, RING_PP_DIR_BASE(ring));
1706	intel_ring_emit(ring, get_pd_offset(ppgtt));
1707	intel_ring_emit(ring, MI_NOOP);
1708	intel_ring_advance(ring);
1709
1710	/* XXX: RCS is the only one to auto invalidate the TLBs? */
1711	if (ring->id != RCS) {
1712		ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
1713		if (ret)
1714			return ret;
1715	}
1716
1717	return 0;
1718}
1719
1720static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
1721			  struct drm_i915_gem_request *req)
1722{
1723	struct intel_engine_cs *ring = req->ring;
1724	struct drm_device *dev = ppgtt->base.dev;
1725	struct drm_i915_private *dev_priv = dev->dev_private;
1726
1727
1728	I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
1729	I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
1730
1731	POSTING_READ(RING_PP_DIR_DCLV(ring));
1732
1733	return 0;
1734}
1735
1736static void gen8_ppgtt_enable(struct drm_device *dev)
1737{
1738	struct drm_i915_private *dev_priv = dev->dev_private;
1739	struct intel_engine_cs *ring;
1740	int j;
1741
1742	for_each_ring(ring, dev_priv, j) {
1743		u32 four_level = USES_FULL_48BIT_PPGTT(dev) ? GEN8_GFX_PPGTT_48B : 0;
1744		I915_WRITE(RING_MODE_GEN7(ring),
1745			   _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level));
1746	}
1747}
1748
1749static void gen7_ppgtt_enable(struct drm_device *dev)
1750{
1751	struct drm_i915_private *dev_priv = dev->dev_private;
1752	struct intel_engine_cs *ring;
1753	uint32_t ecochk, ecobits;
1754	int i;
1755
1756	ecobits = I915_READ(GAC_ECO_BITS);
1757	I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
1758
1759	ecochk = I915_READ(GAM_ECOCHK);
1760	if (IS_HASWELL(dev)) {
1761		ecochk |= ECOCHK_PPGTT_WB_HSW;
1762	} else {
1763		ecochk |= ECOCHK_PPGTT_LLC_IVB;
1764		ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
1765	}
1766	I915_WRITE(GAM_ECOCHK, ecochk);
1767
1768	for_each_ring(ring, dev_priv, i) {
1769		/* GFX_MODE is per-ring on gen7+ */
1770		I915_WRITE(RING_MODE_GEN7(ring),
1771			   _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
 
1772	}
1773}
1774
1775static void gen6_ppgtt_enable(struct drm_device *dev)
1776{
1777	struct drm_i915_private *dev_priv = dev->dev_private;
1778	uint32_t ecochk, gab_ctl, ecobits;
1779
1780	ecobits = I915_READ(GAC_ECO_BITS);
1781	I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
1782		   ECOBITS_PPGTT_CACHE64B);
 
1783
1784	gab_ctl = I915_READ(GAB_CTL);
1785	I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
 
 
1786
1787	ecochk = I915_READ(GAM_ECOCHK);
1788	I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
 
 
1789
1790	I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
 
 
 
1791}
1792
1793/* PPGTT support for Sandybdrige/Gen6 and later */
1794static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
1795				   uint64_t start,
1796				   uint64_t length,
1797				   bool use_scratch)
1798{
1799	struct i915_hw_ppgtt *ppgtt =
1800		container_of(vm, struct i915_hw_ppgtt, base);
1801	gen6_pte_t *pt_vaddr, scratch_pte;
1802	unsigned first_entry = start >> PAGE_SHIFT;
1803	unsigned num_entries = length >> PAGE_SHIFT;
1804	unsigned act_pt = first_entry / GEN6_PTES;
1805	unsigned first_pte = first_entry % GEN6_PTES;
1806	unsigned last_pte, i;
1807
1808	scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
1809				     I915_CACHE_LLC, true, 0);
1810
1811	while (num_entries) {
1812		last_pte = first_pte + num_entries;
1813		if (last_pte > GEN6_PTES)
1814			last_pte = GEN6_PTES;
1815
1816		pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);
1817
1818		for (i = first_pte; i < last_pte; i++)
1819			pt_vaddr[i] = scratch_pte;
 
 
 
 
 
 
 
 
 
 
 
1820
1821		kunmap_px(ppgtt, pt_vaddr);
 
 
1822
1823		num_entries -= last_pte - first_pte;
1824		first_pte = 0;
1825		act_pt++;
1826	}
1827}
1828
1829static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
1830				      struct sg_table *pages,
1831				      uint64_t start,
1832				      enum i915_cache_level cache_level, u32 flags)
1833{
1834	struct i915_hw_ppgtt *ppgtt =
1835		container_of(vm, struct i915_hw_ppgtt, base);
1836	gen6_pte_t *pt_vaddr;
1837	unsigned first_entry = start >> PAGE_SHIFT;
1838	unsigned act_pt = first_entry / GEN6_PTES;
1839	unsigned act_pte = first_entry % GEN6_PTES;
1840	struct sg_page_iter sg_iter;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1841
1842	pt_vaddr = NULL;
1843	for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
1844		if (pt_vaddr == NULL)
1845			pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);
1846
1847		pt_vaddr[act_pte] =
1848			vm->pte_encode(sg_page_iter_dma_address(&sg_iter),
1849				       cache_level, true, flags);
1850
1851		if (++act_pte == GEN6_PTES) {
1852			kunmap_px(ppgtt, pt_vaddr);
1853			pt_vaddr = NULL;
1854			act_pt++;
1855			act_pte = 0;
1856		}
1857	}
1858	if (pt_vaddr)
1859		kunmap_px(ppgtt, pt_vaddr);
 
1860}
1861
1862static int gen6_alloc_va_range(struct i915_address_space *vm,
1863			       uint64_t start_in, uint64_t length_in)
1864{
1865	DECLARE_BITMAP(new_page_tables, I915_PDES);
1866	struct drm_device *dev = vm->dev;
1867	struct drm_i915_private *dev_priv = dev->dev_private;
1868	struct i915_hw_ppgtt *ppgtt =
1869				container_of(vm, struct i915_hw_ppgtt, base);
1870	struct i915_page_table *pt;
1871	uint32_t start, length, start_save, length_save;
1872	uint32_t pde, temp;
1873	int ret;
1874
1875	if (WARN_ON(start_in + length_in > ppgtt->base.total))
1876		return -ENODEV;
1877
1878	start = start_save = start_in;
1879	length = length_save = length_in;
1880
1881	bitmap_zero(new_page_tables, I915_PDES);
1882
1883	/* The allocation is done in two stages so that we can bail out with
1884	 * minimal amount of pain. The first stage finds new page tables that
1885	 * need allocation. The second stage marks use ptes within the page
1886	 * tables.
1887	 */
1888	gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) {
1889		if (pt != vm->scratch_pt) {
1890			WARN_ON(bitmap_empty(pt->used_ptes, GEN6_PTES));
1891			continue;
1892		}
 
 
 
 
1893
1894		/* We've already allocated a page table */
1895		WARN_ON(!bitmap_empty(pt->used_ptes, GEN6_PTES));
1896
1897		pt = alloc_pt(dev);
1898		if (IS_ERR(pt)) {
1899			ret = PTR_ERR(pt);
1900			goto unwind_out;
 
 
 
 
 
 
 
 
1901		}
1902
1903		gen6_initialize_pt(vm, pt);
1904
1905		ppgtt->pd.page_table[pde] = pt;
1906		__set_bit(pde, new_page_tables);
1907		trace_i915_page_table_entry_alloc(vm, pde, start, GEN6_PDE_SHIFT);
1908	}
 
1909
1910	start = start_save;
1911	length = length_save;
1912
1913	gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) {
1914		DECLARE_BITMAP(tmp_bitmap, GEN6_PTES);
1915
1916		bitmap_zero(tmp_bitmap, GEN6_PTES);
1917		bitmap_set(tmp_bitmap, gen6_pte_index(start),
1918			   gen6_pte_count(start, length));
1919
1920		if (__test_and_clear_bit(pde, new_page_tables))
1921			gen6_write_pde(&ppgtt->pd, pde, pt);
1922
1923		trace_i915_page_table_entry_map(vm, pde, pt,
1924					 gen6_pte_index(start),
1925					 gen6_pte_count(start, length),
1926					 GEN6_PTES);
1927		bitmap_or(pt->used_ptes, tmp_bitmap, pt->used_ptes,
1928				GEN6_PTES);
1929	}
1930
1931	WARN_ON(!bitmap_empty(new_page_tables, I915_PDES));
1932
1933	/* Make sure write is complete before other code can use this page
1934	 * table. Also require for WC mapped PTEs */
1935	readl(dev_priv->gtt.gsm);
1936
1937	mark_tlbs_dirty(ppgtt);
1938	return 0;
1939
1940unwind_out:
1941	for_each_set_bit(pde, new_page_tables, I915_PDES) {
1942		struct i915_page_table *pt = ppgtt->pd.page_table[pde];
1943
1944		ppgtt->pd.page_table[pde] = vm->scratch_pt;
1945		free_pt(vm->dev, pt);
1946	}
1947
1948	mark_tlbs_dirty(ppgtt);
1949	return ret;
1950}
1951
1952static int gen6_init_scratch(struct i915_address_space *vm)
1953{
1954	struct drm_device *dev = vm->dev;
 
 
 
 
 
 
1955
1956	vm->scratch_page = alloc_scratch_page(dev);
1957	if (IS_ERR(vm->scratch_page))
1958		return PTR_ERR(vm->scratch_page);
1959
1960	vm->scratch_pt = alloc_pt(dev);
1961	if (IS_ERR(vm->scratch_pt)) {
1962		free_scratch_page(dev, vm->scratch_page);
1963		return PTR_ERR(vm->scratch_pt);
1964	}
1965
1966	gen6_initialize_pt(vm, vm->scratch_pt);
 
1967
1968	return 0;
1969}
1970
1971static void gen6_free_scratch(struct i915_address_space *vm)
1972{
1973	struct drm_device *dev = vm->dev;
 
 
 
 
1974
1975	free_pt(dev, vm->scratch_pt);
1976	free_scratch_page(dev, vm->scratch_page);
 
1977}
1978
1979static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
1980{
1981	struct i915_hw_ppgtt *ppgtt =
1982		container_of(vm, struct i915_hw_ppgtt, base);
1983	struct i915_page_table *pt;
1984	uint32_t pde;
1985
1986	drm_mm_remove_node(&ppgtt->node);
 
 
 
1987
1988	gen6_for_all_pdes(pt, ppgtt, pde) {
1989		if (pt != vm->scratch_pt)
1990			free_pt(ppgtt->base.dev, pt);
1991	}
1992
1993	gen6_free_scratch(vm);
1994}
1995
1996static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
1997{
1998	struct i915_address_space *vm = &ppgtt->base;
1999	struct drm_device *dev = ppgtt->base.dev;
2000	struct drm_i915_private *dev_priv = dev->dev_private;
2001	bool retried = false;
2002	int ret;
2003
2004	/* PPGTT PDEs reside in the GGTT and consists of 512 entries. The
2005	 * allocator works in address space sizes, so it's multiplied by page
2006	 * size. We allocate at the top of the GTT to avoid fragmentation.
2007	 */
2008	BUG_ON(!drm_mm_initialized(&dev_priv->gtt.base.mm));
2009
2010	ret = gen6_init_scratch(vm);
2011	if (ret)
2012		return ret;
2013
2014alloc:
2015	ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm,
2016						  &ppgtt->node, GEN6_PD_SIZE,
2017						  GEN6_PD_ALIGN, 0,
2018						  0, dev_priv->gtt.base.total,
2019						  DRM_MM_TOPDOWN);
2020	if (ret == -ENOSPC && !retried) {
2021		ret = i915_gem_evict_something(dev, &dev_priv->gtt.base,
2022					       GEN6_PD_SIZE, GEN6_PD_ALIGN,
2023					       I915_CACHE_NONE,
2024					       0, dev_priv->gtt.base.total,
2025					       0);
2026		if (ret)
2027			goto err_out;
2028
2029		retried = true;
2030		goto alloc;
2031	}
2032
2033	if (ret)
2034		goto err_out;
2035
2036
2037	if (ppgtt->node.start < dev_priv->gtt.mappable_end)
2038		DRM_DEBUG("Forced to use aperture for PDEs\n");
2039
2040	return 0;
2041
2042err_out:
2043	gen6_free_scratch(vm);
2044	return ret;
2045}
2046
2047static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt)
2048{
2049	return gen6_ppgtt_allocate_page_directories(ppgtt);
 
 
2050}
2051
2052static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
2053				  uint64_t start, uint64_t length)
 
2054{
2055	struct i915_page_table *unused;
2056	uint32_t pde, temp;
 
 
 
 
 
 
 
 
 
2057
2058	gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde)
2059		ppgtt->pd.page_table[pde] = ppgtt->base.scratch_pt;
 
 
2060}
2061
2062static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
2063{
2064	struct drm_device *dev = ppgtt->base.dev;
2065	struct drm_i915_private *dev_priv = dev->dev_private;
2066	int ret;
 
 
 
2067
2068	ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode;
2069	if (IS_GEN6(dev)) {
2070		ppgtt->switch_mm = gen6_mm_switch;
2071	} else if (IS_HASWELL(dev)) {
2072		ppgtt->switch_mm = hsw_mm_switch;
2073	} else if (IS_GEN7(dev)) {
2074		ppgtt->switch_mm = gen7_mm_switch;
2075	} else
2076		BUG();
2077
2078	if (intel_vgpu_active(dev))
2079		ppgtt->switch_mm = vgpu_mm_switch;
 
 
2080
2081	ret = gen6_ppgtt_alloc(ppgtt);
2082	if (ret)
2083		return ret;
2084
2085	ppgtt->base.allocate_va_range = gen6_alloc_va_range;
2086	ppgtt->base.clear_range = gen6_ppgtt_clear_range;
2087	ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
2088	ppgtt->base.unbind_vma = ppgtt_unbind_vma;
2089	ppgtt->base.bind_vma = ppgtt_bind_vma;
2090	ppgtt->base.cleanup = gen6_ppgtt_cleanup;
2091	ppgtt->base.start = 0;
2092	ppgtt->base.total = I915_PDES * GEN6_PTES * PAGE_SIZE;
2093	ppgtt->debug_dump = gen6_dump_ppgtt;
2094
2095	ppgtt->pd.base.ggtt_offset =
2096		ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t);
 
 
 
 
2097
2098	ppgtt->pd_addr = (gen6_pte_t __iomem *)dev_priv->gtt.gsm +
2099		ppgtt->pd.base.ggtt_offset / sizeof(gen6_pte_t);
 
 
 
2100
2101	gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total);
 
2102
2103	gen6_write_page_range(dev_priv, &ppgtt->pd, 0, ppgtt->base.total);
 
 
2104
2105	DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n",
2106			 ppgtt->node.size >> 20,
2107			 ppgtt->node.start / PAGE_SIZE);
2108
2109	DRM_DEBUG("Adding PPGTT at offset %x\n",
2110		  ppgtt->pd.base.ggtt_offset << 10);
 
 
 
 
 
 
2111
2112	return 0;
2113}
2114
2115static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
2116{
2117	ppgtt->base.dev = dev;
2118
2119	if (INTEL_INFO(dev)->gen < 8)
2120		return gen6_ppgtt_init(ppgtt);
2121	else
2122		return gen8_ppgtt_init(ppgtt);
2123}
2124
2125static void i915_address_space_init(struct i915_address_space *vm,
2126				    struct drm_i915_private *dev_priv)
2127{
2128	drm_mm_init(&vm->mm, vm->start, vm->total);
2129	vm->dev = dev_priv->dev;
2130	INIT_LIST_HEAD(&vm->active_list);
2131	INIT_LIST_HEAD(&vm->inactive_list);
2132	list_add_tail(&vm->global_link, &dev_priv->vm_list);
2133}
2134
2135static void gtt_write_workarounds(struct drm_device *dev)
2136{
2137	struct drm_i915_private *dev_priv = dev->dev_private;
2138
2139	/* This function is for gtt related workarounds. This function is
2140	 * called on driver load and after a GPU reset, so you can place
2141	 * workarounds here even if they get overwritten by GPU reset.
 
 
2142	 */
2143	/* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt */
2144	if (IS_BROADWELL(dev))
2145		I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
2146	else if (IS_CHERRYVIEW(dev))
2147		I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
2148	else if (IS_SKYLAKE(dev))
2149		I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
2150	else if (IS_BROXTON(dev))
2151		I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
2152}
2153
2154int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
2155{
2156	struct drm_i915_private *dev_priv = dev->dev_private;
2157	int ret = 0;
 
 
 
 
 
 
2158
2159	ret = __hw_ppgtt_init(dev, ppgtt);
2160	if (ret == 0) {
2161		kref_init(&ppgtt->ref);
2162		i915_address_space_init(&ppgtt->base, dev_priv);
2163	}
2164
2165	return ret;
 
 
2166}
2167
2168int i915_ppgtt_init_hw(struct drm_device *dev)
2169{
2170	gtt_write_workarounds(dev);
2171
2172	/* In the case of execlists, PPGTT is enabled by the context descriptor
2173	 * and the PDPs are contained within the context itself.  We don't
2174	 * need to do anything here. */
2175	if (i915.enable_execlists)
2176		return 0;
2177
2178	if (!USES_PPGTT(dev))
2179		return 0;
2180
2181	if (IS_GEN6(dev))
2182		gen6_ppgtt_enable(dev);
2183	else if (IS_GEN7(dev))
2184		gen7_ppgtt_enable(dev);
2185	else if (INTEL_INFO(dev)->gen >= 8)
2186		gen8_ppgtt_enable(dev);
2187	else
2188		MISSING_CASE(INTEL_INFO(dev)->gen);
2189
2190	return 0;
2191}
2192
2193int i915_ppgtt_init_ring(struct drm_i915_gem_request *req)
2194{
2195	struct drm_i915_private *dev_priv = req->ring->dev->dev_private;
2196	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2197
2198	if (i915.enable_execlists)
2199		return 0;
2200
2201	if (!ppgtt)
2202		return 0;
2203
2204	return ppgtt->switch_mm(ppgtt, req);
 
2205}
2206
2207struct i915_hw_ppgtt *
2208i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv)
2209{
2210	struct i915_hw_ppgtt *ppgtt;
2211	int ret;
 
2212
2213	ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
2214	if (!ppgtt)
2215		return ERR_PTR(-ENOMEM);
2216
2217	ret = i915_ppgtt_init(dev, ppgtt);
2218	if (ret) {
2219		kfree(ppgtt);
2220		return ERR_PTR(ret);
2221	}
2222
2223	ppgtt->file_priv = fpriv;
2224
2225	trace_i915_ppgtt_create(&ppgtt->base);
 
 
 
2226
2227	return ppgtt;
2228}
2229
2230void  i915_ppgtt_release(struct kref *kref)
2231{
2232	struct i915_hw_ppgtt *ppgtt =
2233		container_of(kref, struct i915_hw_ppgtt, ref);
 
2234
2235	trace_i915_ppgtt_release(&ppgtt->base);
 
 
2236
2237	/* vmas should already be unbound */
2238	WARN_ON(!list_empty(&ppgtt->base.active_list));
2239	WARN_ON(!list_empty(&ppgtt->base.inactive_list));
 
 
2240
2241	list_del(&ppgtt->base.global_link);
2242	drm_mm_takedown(&ppgtt->base.mm);
2243
2244	ppgtt->base.cleanup(&ppgtt->base);
 
 
 
 
2245	kfree(ppgtt);
 
2246}
2247
2248extern int intel_iommu_gfx_mapped;
2249/* Certain Gen5 chipsets require require idling the GPU before
2250 * unmapping anything from the GTT when VT-d is enabled.
2251 */
2252static bool needs_idle_maps(struct drm_device *dev)
2253{
2254#ifdef CONFIG_INTEL_IOMMU
2255	/* Query intel_iommu to see if we need the workaround. Presumably that
2256	 * was loaded first.
 
 
 
2257	 */
2258	if (IS_GEN5(dev) && IS_MOBILE(dev) && intel_iommu_gfx_mapped)
2259		return true;
2260#endif
2261	return false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2262}
2263
2264static bool do_idling(struct drm_i915_private *dev_priv)
2265{
2266	bool ret = dev_priv->mm.interruptible;
2267
2268	if (unlikely(dev_priv->gtt.do_idle_maps)) {
2269		dev_priv->mm.interruptible = false;
2270		if (i915_gpu_idle(dev_priv->dev)) {
2271			DRM_ERROR("Couldn't idle GPU\n");
2272			/* Wait a bit, in hopes it avoids the hang */
2273			udelay(10);
2274		}
2275	}
2276
2277	return ret;
 
 
 
 
 
2278}
2279
2280static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
 
2281{
2282	if (unlikely(dev_priv->gtt.do_idle_maps))
2283		dev_priv->mm.interruptible = interruptible;
 
 
2284}
2285
2286void i915_check_and_clear_faults(struct drm_device *dev)
 
2287{
2288	struct drm_i915_private *dev_priv = dev->dev_private;
2289	struct intel_engine_cs *ring;
2290	int i;
2291
2292	if (INTEL_INFO(dev)->gen < 6)
2293		return;
 
2294
2295	for_each_ring(ring, dev_priv, i) {
2296		u32 fault_reg;
2297		fault_reg = I915_READ(RING_FAULT_REG(ring));
2298		if (fault_reg & RING_FAULT_VALID) {
2299			DRM_DEBUG_DRIVER("Unexpected fault\n"
2300					 "\tAddr: 0x%08lx\n"
2301					 "\tAddress space: %s\n"
2302					 "\tSource ID: %d\n"
2303					 "\tType: %d\n",
2304					 fault_reg & PAGE_MASK,
2305					 fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
2306					 RING_FAULT_SRCID(fault_reg),
2307					 RING_FAULT_FAULT_TYPE(fault_reg));
2308			I915_WRITE(RING_FAULT_REG(ring),
2309				   fault_reg & ~RING_FAULT_VALID);
2310		}
2311	}
2312	POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS]));
2313}
2314
2315static void i915_ggtt_flush(struct drm_i915_private *dev_priv)
 
 
 
2316{
2317	if (INTEL_INFO(dev_priv->dev)->gen < 6) {
2318		intel_gtt_chipset_flush();
2319	} else {
2320		I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
2321		POSTING_READ(GFX_FLSH_CNTL_GEN6);
2322	}
2323}
2324
2325void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
2326{
2327	struct drm_i915_private *dev_priv = dev->dev_private;
2328
2329	/* Don't bother messing with faults pre GEN6 as we have little
2330	 * documentation supporting that it's a good idea.
2331	 */
2332	if (INTEL_INFO(dev)->gen < 6)
2333		return;
2334
2335	i915_check_and_clear_faults(dev);
2336
2337	dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
2338				       dev_priv->gtt.base.start,
2339				       dev_priv->gtt.base.total,
2340				       true);
2341
2342	i915_ggtt_flush(dev_priv);
2343}
2344
2345int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
2346{
2347	if (!dma_map_sg(&obj->base.dev->pdev->dev,
2348			obj->pages->sgl, obj->pages->nents,
2349			PCI_DMA_BIDIRECTIONAL))
2350		return -ENOSPC;
2351
2352	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2353}
2354
2355static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
2356{
2357#ifdef writeq
2358	writeq(pte, addr);
2359#else
2360	iowrite32((u32)pte, addr);
2361	iowrite32(pte >> 32, addr + 4);
2362#endif
 
 
 
 
 
 
 
 
 
 
 
2363}
2364
2365static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
2366				     struct sg_table *st,
2367				     uint64_t start,
2368				     enum i915_cache_level level, u32 unused)
2369{
2370	struct drm_i915_private *dev_priv = vm->dev->dev_private;
2371	unsigned first_entry = start >> PAGE_SHIFT;
2372	gen8_pte_t __iomem *gtt_entries =
2373		(gen8_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
2374	int i = 0;
2375	struct sg_page_iter sg_iter;
2376	dma_addr_t addr = 0; /* shut up gcc */
2377	int rpm_atomic_seq;
2378
2379	rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
2380
2381	for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
2382		addr = sg_dma_address(sg_iter.sg) +
2383			(sg_iter.sg_pgoffset << PAGE_SHIFT);
2384		gen8_set_pte(&gtt_entries[i],
2385			     gen8_pte_encode(addr, level, true));
2386		i++;
2387	}
2388
2389	/*
2390	 * XXX: This serves as a posting read to make sure that the PTE has
2391	 * actually been updated. There is some concern that even though
2392	 * registers and PTEs are within the same BAR that they are potentially
2393	 * of NUMA access patterns. Therefore, even with the way we assume
2394	 * hardware should work, we must keep this posting read for paranoia.
2395	 */
2396	if (i != 0)
2397		WARN_ON(readq(&gtt_entries[i-1])
2398			!= gen8_pte_encode(addr, level, true));
2399
2400	/* This next bit makes the above posting read even more important. We
2401	 * want to flush the TLBs only after we're certain all the PTE updates
2402	 * have finished.
2403	 */
2404	I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
2405	POSTING_READ(GFX_FLSH_CNTL_GEN6);
2406
2407	assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
2408}
2409
2410struct insert_entries {
2411	struct i915_address_space *vm;
2412	struct sg_table *st;
2413	uint64_t start;
2414	enum i915_cache_level level;
2415	u32 flags;
2416};
2417
2418static int gen8_ggtt_insert_entries__cb(void *_arg)
2419{
2420	struct insert_entries *arg = _arg;
2421	gen8_ggtt_insert_entries(arg->vm, arg->st,
2422				 arg->start, arg->level, arg->flags);
2423	return 0;
2424}
2425
2426static void gen8_ggtt_insert_entries__BKL(struct i915_address_space *vm,
2427					  struct sg_table *st,
2428					  uint64_t start,
2429					  enum i915_cache_level level,
2430					  u32 flags)
2431{
2432	struct insert_entries arg = { vm, st, start, level, flags };
2433	stop_machine(gen8_ggtt_insert_entries__cb, &arg, NULL);
 
 
 
 
 
2434}
2435
2436/*
2437 * Binds an object into the global gtt with the specified cache level. The object
2438 * will be accessible to the GPU via commands whose operands reference offsets
2439 * within the global GTT as well as accessible by the GPU through the GMADR
2440 * mapped BAR (dev_priv->mm.gtt->gtt).
2441 */
2442static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
2443				     struct sg_table *st,
2444				     uint64_t start,
2445				     enum i915_cache_level level, u32 flags)
2446{
2447	struct drm_i915_private *dev_priv = vm->dev->dev_private;
2448	unsigned first_entry = start >> PAGE_SHIFT;
2449	gen6_pte_t __iomem *gtt_entries =
2450		(gen6_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
2451	int i = 0;
2452	struct sg_page_iter sg_iter;
2453	dma_addr_t addr = 0;
2454	int rpm_atomic_seq;
2455
2456	rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
2457
2458	for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
2459		addr = sg_page_iter_dma_address(&sg_iter);
2460		iowrite32(vm->pte_encode(addr, level, true, flags), &gtt_entries[i]);
2461		i++;
2462	}
2463
2464	/* XXX: This serves as a posting read to make sure that the PTE has
2465	 * actually been updated. There is some concern that even though
2466	 * registers and PTEs are within the same BAR that they are potentially
2467	 * of NUMA access patterns. Therefore, even with the way we assume
2468	 * hardware should work, we must keep this posting read for paranoia.
2469	 */
2470	if (i != 0) {
2471		unsigned long gtt = readl(&gtt_entries[i-1]);
2472		WARN_ON(gtt != vm->pte_encode(addr, level, true, flags));
2473	}
2474
2475	/* This next bit makes the above posting read even more important. We
2476	 * want to flush the TLBs only after we're certain all the PTE updates
2477	 * have finished.
2478	 */
2479	I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
2480	POSTING_READ(GFX_FLSH_CNTL_GEN6);
2481
2482	assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
 
 
2483}
2484
2485static void gen8_ggtt_clear_range(struct i915_address_space *vm,
2486				  uint64_t start,
2487				  uint64_t length,
2488				  bool use_scratch)
2489{
2490	struct drm_i915_private *dev_priv = vm->dev->dev_private;
2491	unsigned first_entry = start >> PAGE_SHIFT;
2492	unsigned num_entries = length >> PAGE_SHIFT;
2493	gen8_pte_t scratch_pte, __iomem *gtt_base =
2494		(gen8_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
2495	const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
2496	int i;
2497	int rpm_atomic_seq;
2498
2499	rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
2500
2501	if (WARN(num_entries > max_entries,
2502		 "First entry = %d; Num entries = %d (max=%d)\n",
2503		 first_entry, num_entries, max_entries))
2504		num_entries = max_entries;
2505
2506	scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
2507				      I915_CACHE_LLC,
2508				      use_scratch);
2509	for (i = 0; i < num_entries; i++)
2510		gen8_set_pte(&gtt_base[i], scratch_pte);
2511	readl(gtt_base);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2512
2513	assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2514}
2515
2516static void gen6_ggtt_clear_range(struct i915_address_space *vm,
2517				  uint64_t start,
2518				  uint64_t length,
2519				  bool use_scratch)
2520{
2521	struct drm_i915_private *dev_priv = vm->dev->dev_private;
2522	unsigned first_entry = start >> PAGE_SHIFT;
2523	unsigned num_entries = length >> PAGE_SHIFT;
2524	gen6_pte_t scratch_pte, __iomem *gtt_base =
2525		(gen6_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
2526	const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
2527	int i;
2528	int rpm_atomic_seq;
2529
2530	rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
2531
2532	if (WARN(num_entries > max_entries,
2533		 "First entry = %d; Num entries = %d (max=%d)\n",
2534		 first_entry, num_entries, max_entries))
2535		num_entries = max_entries;
2536
2537	scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
2538				     I915_CACHE_LLC, use_scratch, 0);
2539
2540	for (i = 0; i < num_entries; i++)
2541		iowrite32(scratch_pte, &gtt_base[i]);
2542	readl(gtt_base);
2543
2544	assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
2545}
2546
2547static void i915_ggtt_insert_entries(struct i915_address_space *vm,
2548				     struct sg_table *pages,
2549				     uint64_t start,
2550				     enum i915_cache_level cache_level, u32 unused)
 
2551{
2552	struct drm_i915_private *dev_priv = vm->dev->dev_private;
2553	unsigned int flags = (cache_level == I915_CACHE_NONE) ?
2554		AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
2555	int rpm_atomic_seq;
2556
2557	rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
2558
2559	intel_gtt_insert_sg_entries(pages, start >> PAGE_SHIFT, flags);
 
2560
2561	assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
 
 
 
 
 
 
2562
 
 
2563}
2564
2565static void i915_ggtt_clear_range(struct i915_address_space *vm,
2566				  uint64_t start,
2567				  uint64_t length,
2568				  bool unused)
2569{
2570	struct drm_i915_private *dev_priv = vm->dev->dev_private;
2571	unsigned first_entry = start >> PAGE_SHIFT;
2572	unsigned num_entries = length >> PAGE_SHIFT;
2573	int rpm_atomic_seq;
2574
2575	rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
2576
2577	intel_gtt_clear_range(first_entry, num_entries);
2578
2579	assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
2580}
2581
2582static int ggtt_bind_vma(struct i915_vma *vma,
2583			 enum i915_cache_level cache_level,
2584			 u32 flags)
2585{
 
2586	struct drm_i915_gem_object *obj = vma->obj;
2587	u32 pte_flags = 0;
2588	int ret;
2589
2590	ret = i915_get_ggtt_vma_pages(vma);
2591	if (ret)
2592		return ret;
2593
2594	/* Currently applicable only to VLV */
2595	if (obj->gt_ro)
2596		pte_flags |= PTE_READ_ONLY;
2597
2598	vma->vm->insert_entries(vma->vm, vma->ggtt_view.pages,
2599				vma->node.start,
2600				cache_level, pte_flags);
 
2601
2602	/*
2603	 * Without aliasing PPGTT there's no difference between
2604	 * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
2605	 * upgrade to both bound if we bind either to avoid double-binding.
2606	 */
2607	vma->bound |= GLOBAL_BIND | LOCAL_BIND;
2608
2609	return 0;
2610}
2611
 
 
 
 
 
 
 
 
 
2612static int aliasing_gtt_bind_vma(struct i915_vma *vma,
2613				 enum i915_cache_level cache_level,
2614				 u32 flags)
2615{
2616	struct drm_device *dev = vma->vm->dev;
2617	struct drm_i915_private *dev_priv = dev->dev_private;
2618	struct drm_i915_gem_object *obj = vma->obj;
2619	struct sg_table *pages = obj->pages;
2620	u32 pte_flags = 0;
2621	int ret;
2622
2623	ret = i915_get_ggtt_vma_pages(vma);
2624	if (ret)
2625		return ret;
2626	pages = vma->ggtt_view.pages;
2627
2628	/* Currently applicable only to VLV */
2629	if (obj->gt_ro)
 
2630		pte_flags |= PTE_READ_ONLY;
2631
 
 
 
 
 
 
 
 
 
 
2632
2633	if (flags & GLOBAL_BIND) {
2634		vma->vm->insert_entries(vma->vm, pages,
2635					vma->node.start,
2636					cache_level, pte_flags);
2637	}
2638
2639	if (flags & LOCAL_BIND) {
2640		struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
2641		appgtt->base.insert_entries(&appgtt->base, pages,
2642					    vma->node.start,
2643					    cache_level, pte_flags);
 
 
2644	}
2645
2646	return 0;
2647}
2648
2649static void ggtt_unbind_vma(struct i915_vma *vma)
2650{
2651	struct drm_device *dev = vma->vm->dev;
2652	struct drm_i915_private *dev_priv = dev->dev_private;
2653	struct drm_i915_gem_object *obj = vma->obj;
2654	const uint64_t size = min_t(uint64_t,
2655				    obj->base.size,
2656				    vma->node.size);
 
 
 
 
 
 
 
2657
2658	if (vma->bound & GLOBAL_BIND) {
2659		vma->vm->clear_range(vma->vm,
2660				     vma->node.start,
2661				     size,
2662				     true);
2663	}
 
2664
2665	if (dev_priv->mm.aliasing_ppgtt && vma->bound & LOCAL_BIND) {
2666		struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
 
 
 
 
2667
2668		appgtt->base.clear_range(&appgtt->base,
2669					 vma->node.start,
2670					 size,
2671					 true);
 
 
2672	}
 
 
2673}
2674
2675void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
2676{
2677	struct drm_device *dev = obj->base.dev;
2678	struct drm_i915_private *dev_priv = dev->dev_private;
2679	bool interruptible;
2680
2681	interruptible = do_idling(dev_priv);
 
 
 
 
2682
2683	dma_unmap_sg(&dev->pdev->dev, obj->pages->sgl, obj->pages->nents,
2684		     PCI_DMA_BIDIRECTIONAL);
2685
2686	undo_idling(dev_priv, interruptible);
2687}
2688
2689static void i915_gtt_color_adjust(struct drm_mm_node *node,
2690				  unsigned long color,
2691				  u64 *start,
2692				  u64 *end)
2693{
 
 
 
 
 
 
 
 
 
2694	if (node->color != color)
2695		*start += 4096;
 
 
 
 
 
 
 
 
 
 
2696
2697	if (!list_empty(&node->node_list)) {
2698		node = list_entry(node->node_list.next,
2699				  struct drm_mm_node,
2700				  node_list);
2701		if (node->allocated && node->color != color)
2702			*end -= 4096;
2703	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2704}
2705
2706static int i915_gem_setup_global_gtt(struct drm_device *dev,
2707				     u64 start,
2708				     u64 mappable_end,
2709				     u64 end)
 
 
 
2710{
2711	/* Let GEM Manage all of the aperture.
2712	 *
2713	 * However, leave one page at the end still bound to the scratch page.
2714	 * There are a number of places where the hardware apparently prefetches
2715	 * past the end of the object, and we've seen multiple hangs with the
2716	 * GPU head pointer stuck in a batchbuffer bound at the last page of the
2717	 * aperture.  One page should be enough to keep any prefetching inside
2718	 * of the aperture.
2719	 */
2720	struct drm_i915_private *dev_priv = dev->dev_private;
2721	struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
2722	struct drm_mm_node *entry;
2723	struct drm_i915_gem_object *obj;
2724	unsigned long hole_start, hole_end;
 
2725	int ret;
2726
2727	BUG_ON(mappable_end > end);
2728
2729	ggtt_vm->start = start;
2730
2731	/* Subtract the guard page before address space initialization to
2732	 * shrink the range used by drm_mm */
2733	ggtt_vm->total = end - start - PAGE_SIZE;
2734	i915_address_space_init(ggtt_vm, dev_priv);
2735	ggtt_vm->total += PAGE_SIZE;
2736
2737	if (intel_vgpu_active(dev)) {
2738		ret = intel_vgt_balloon(dev);
2739		if (ret)
2740			return ret;
2741	}
2742
2743	if (!HAS_LLC(dev))
2744		ggtt_vm->mm.color_adjust = i915_gtt_color_adjust;
2745
2746	/* Mark any preallocated objects as occupied */
2747	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
2748		struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
2749
2750		DRM_DEBUG_KMS("reserving preallocated space: %llx + %zx\n",
2751			      i915_gem_obj_ggtt_offset(obj), obj->base.size);
 
 
 
 
 
2752
2753		WARN_ON(i915_gem_obj_ggtt_bound(obj));
2754		ret = drm_mm_reserve_node(&ggtt_vm->mm, &vma->node);
2755		if (ret) {
2756			DRM_DEBUG_KMS("Reservation failed: %i\n", ret);
2757			return ret;
2758		}
2759		vma->bound |= GLOBAL_BIND;
2760		__i915_vma_set_map_and_fenceable(vma);
2761		list_add_tail(&vma->vm_link, &ggtt_vm->inactive_list);
2762	}
2763
2764	/* Clear any non-preallocated blocks */
2765	drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) {
2766		DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
2767			      hole_start, hole_end);
2768		ggtt_vm->clear_range(ggtt_vm, hole_start,
2769				     hole_end - hole_start, true);
2770	}
2771
2772	/* And finally clear the reserved guard page */
2773	ggtt_vm->clear_range(ggtt_vm, end - PAGE_SIZE, PAGE_SIZE, true);
2774
2775	if (USES_PPGTT(dev) && !USES_FULL_PPGTT(dev)) {
2776		struct i915_hw_ppgtt *ppgtt;
2777
2778		ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
2779		if (!ppgtt)
2780			return -ENOMEM;
2781
2782		ret = __hw_ppgtt_init(dev, ppgtt);
2783		if (ret) {
2784			ppgtt->base.cleanup(&ppgtt->base);
2785			kfree(ppgtt);
2786			return ret;
2787		}
2788
2789		if (ppgtt->base.allocate_va_range)
2790			ret = ppgtt->base.allocate_va_range(&ppgtt->base, 0,
2791							    ppgtt->base.total);
2792		if (ret) {
2793			ppgtt->base.cleanup(&ppgtt->base);
2794			kfree(ppgtt);
2795			return ret;
2796		}
2797
2798		ppgtt->base.clear_range(&ppgtt->base,
2799					ppgtt->base.start,
2800					ppgtt->base.total,
2801					true);
2802
2803		dev_priv->mm.aliasing_ppgtt = ppgtt;
2804		WARN_ON(dev_priv->gtt.base.bind_vma != ggtt_bind_vma);
2805		dev_priv->gtt.base.bind_vma = aliasing_gtt_bind_vma;
 
2806	}
2807
2808	return 0;
2809}
2810
2811void i915_gem_init_global_gtt(struct drm_device *dev)
2812{
2813	struct drm_i915_private *dev_priv = dev->dev_private;
2814	u64 gtt_size, mappable_size;
2815
2816	gtt_size = dev_priv->gtt.base.total;
2817	mappable_size = dev_priv->gtt.mappable_end;
2818
2819	i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
2820}
2821
2822void i915_global_gtt_cleanup(struct drm_device *dev)
2823{
2824	struct drm_i915_private *dev_priv = dev->dev_private;
2825	struct i915_address_space *vm = &dev_priv->gtt.base;
2826
2827	if (dev_priv->mm.aliasing_ppgtt) {
2828		struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2829
2830		ppgtt->base.cleanup(&ppgtt->base);
 
 
 
 
 
 
 
2831	}
2832
2833	i915_gem_cleanup_stolen(dev);
2834
2835	if (drm_mm_initialized(&vm->mm)) {
2836		if (intel_vgpu_active(dev))
2837			intel_vgt_deballoon();
2838
2839		drm_mm_takedown(&vm->mm);
2840		list_del(&vm->global_link);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2841	}
2842
2843	vm->cleanup(vm);
2844}
2845
2846static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
2847{
2848	snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
2849	snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
2850	return snb_gmch_ctl << 20;
2851}
2852
2853static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
2854{
2855	bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
2856	bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
2857	if (bdw_gmch_ctl)
2858		bdw_gmch_ctl = 1 << bdw_gmch_ctl;
2859
2860#ifdef CONFIG_X86_32
2861	/* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * PAGE_SIZE */
2862	if (bdw_gmch_ctl > 4)
2863		bdw_gmch_ctl = 4;
2864#endif
2865
2866	return bdw_gmch_ctl << 20;
2867}
2868
2869static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
2870{
2871	gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
2872	gmch_ctrl &= SNB_GMCH_GGMS_MASK;
2873
2874	if (gmch_ctrl)
2875		return 1 << (20 + gmch_ctrl);
2876
2877	return 0;
2878}
2879
2880static size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
2881{
2882	snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
2883	snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
2884	return snb_gmch_ctl << 25; /* 32 MB units */
2885}
2886
2887static size_t gen8_get_stolen_size(u16 bdw_gmch_ctl)
2888{
2889	bdw_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
2890	bdw_gmch_ctl &= BDW_GMCH_GMS_MASK;
2891	return bdw_gmch_ctl << 25; /* 32 MB units */
2892}
2893
2894static size_t chv_get_stolen_size(u16 gmch_ctrl)
2895{
2896	gmch_ctrl >>= SNB_GMCH_GMS_SHIFT;
2897	gmch_ctrl &= SNB_GMCH_GMS_MASK;
2898
2899	/*
2900	 * 0x0  to 0x10: 32MB increments starting at 0MB
2901	 * 0x11 to 0x16: 4MB increments starting at 8MB
2902	 * 0x17 to 0x1d: 4MB increments start at 36MB
2903	 */
2904	if (gmch_ctrl < 0x11)
2905		return gmch_ctrl << 25;
2906	else if (gmch_ctrl < 0x17)
2907		return (gmch_ctrl - 0x11 + 2) << 22;
2908	else
2909		return (gmch_ctrl - 0x17 + 9) << 22;
2910}
2911
2912static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl)
2913{
2914	gen9_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
2915	gen9_gmch_ctl &= BDW_GMCH_GMS_MASK;
2916
2917	if (gen9_gmch_ctl < 0xf0)
2918		return gen9_gmch_ctl << 25; /* 32 MB units */
2919	else
2920		/* 4MB increments starting at 0xf0 for 4MB */
2921		return (gen9_gmch_ctl - 0xf0 + 1) << 22;
2922}
2923
2924static int ggtt_probe_common(struct drm_device *dev,
2925			     size_t gtt_size)
2926{
2927	struct drm_i915_private *dev_priv = dev->dev_private;
2928	struct i915_page_scratch *scratch_page;
2929	phys_addr_t gtt_phys_addr;
2930
2931	/* For Modern GENs the PTEs and register space are split in the BAR */
2932	gtt_phys_addr = pci_resource_start(dev->pdev, 0) +
2933		(pci_resource_len(dev->pdev, 0) / 2);
2934
2935	/*
2936	 * On BXT writes larger than 64 bit to the GTT pagetable range will be
2937	 * dropped. For WC mappings in general we have 64 byte burst writes
2938	 * when the WC buffer is flushed, so we can't use it, but have to
2939	 * resort to an uncached mapping. The WC issue is easily caught by the
2940	 * readback check when writing GTT PTE entries.
2941	 */
2942	if (IS_BROXTON(dev))
2943		dev_priv->gtt.gsm = ioremap_nocache(gtt_phys_addr, gtt_size);
2944	else
2945		dev_priv->gtt.gsm = ioremap_wc(gtt_phys_addr, gtt_size);
2946	if (!dev_priv->gtt.gsm) {
2947		DRM_ERROR("Failed to map the gtt page table\n");
2948		return -ENOMEM;
2949	}
2950
2951	scratch_page = alloc_scratch_page(dev);
2952	if (IS_ERR(scratch_page)) {
2953		DRM_ERROR("Scratch setup failed\n");
2954		/* iounmap will also get called at remove, but meh */
2955		iounmap(dev_priv->gtt.gsm);
2956		return PTR_ERR(scratch_page);
2957	}
2958
2959	dev_priv->gtt.base.scratch_page = scratch_page;
 
 
2960
2961	return 0;
2962}
2963
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2964/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
2965 * bits. When using advanced contexts each context stores its own PAT, but
2966 * writing this data shouldn't be harmful even in those cases. */
2967static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv)
2968{
2969	uint64_t pat;
2970
2971	pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC)     | /* for normal objects, no eLLC */
2972	      GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */
2973	      GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */
2974	      GEN8_PPAT(3, GEN8_PPAT_UC)                     | /* Uncached objects, mostly for scanout */
2975	      GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
2976	      GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
2977	      GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
2978	      GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
2979
2980	if (!USES_PPGTT(dev_priv->dev))
2981		/* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry,
2982		 * so RTL will always use the value corresponding to
2983		 * pat_sel = 000".
2984		 * So let's disable cache for GGTT to avoid screen corruptions.
2985		 * MOCS still can be used though.
2986		 * - System agent ggtt writes (i.e. cpu gtt mmaps) already work
2987		 * before this patch, i.e. the same uncached + snooping access
2988		 * like on gen6/7 seems to be in effect.
2989		 * - So this just fixes blitter/render access. Again it looks
2990		 * like it's not just uncached access, but uncached + snooping.
2991		 * So we can still hold onto all our assumptions wrt cpu
2992		 * clflushing on LLC machines.
2993		 */
2994		pat = GEN8_PPAT(0, GEN8_PPAT_UC);
2995
2996	/* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b
2997	 * write would work. */
2998	I915_WRITE(GEN8_PRIVATE_PAT_LO, pat);
2999	I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32);
3000}
3001
3002static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
3003{
3004	uint64_t pat;
3005
3006	/*
3007	 * Map WB on BDW to snooped on CHV.
3008	 *
3009	 * Only the snoop bit has meaning for CHV, the rest is
3010	 * ignored.
3011	 *
3012	 * The hardware will never snoop for certain types of accesses:
3013	 * - CPU GTT (GMADR->GGTT->no snoop->memory)
3014	 * - PPGTT page tables
3015	 * - some other special cycles
3016	 *
3017	 * As with BDW, we also need to consider the following for GT accesses:
3018	 * "For GGTT, there is NO pat_sel[2:0] from the entry,
3019	 * so RTL will always use the value corresponding to
3020	 * pat_sel = 000".
3021	 * Which means we must set the snoop bit in PAT entry 0
3022	 * in order to keep the global status page working.
3023	 */
 
3024	pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
3025	      GEN8_PPAT(1, 0) |
3026	      GEN8_PPAT(2, 0) |
3027	      GEN8_PPAT(3, 0) |
3028	      GEN8_PPAT(4, CHV_PPAT_SNOOP) |
3029	      GEN8_PPAT(5, CHV_PPAT_SNOOP) |
3030	      GEN8_PPAT(6, CHV_PPAT_SNOOP) |
3031	      GEN8_PPAT(7, CHV_PPAT_SNOOP);
3032
3033	I915_WRITE(GEN8_PRIVATE_PAT_LO, pat);
3034	I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3035}
3036
3037static int gen8_gmch_probe(struct drm_device *dev,
3038			   u64 *gtt_total,
3039			   size_t *stolen,
3040			   phys_addr_t *mappable_base,
3041			   u64 *mappable_end)
3042{
3043	struct drm_i915_private *dev_priv = dev->dev_private;
3044	u64 gtt_size;
 
3045	u16 snb_gmch_ctl;
3046	int ret;
3047
3048	/* TODO: We're not aware of mappable constraints on gen8 yet */
3049	*mappable_base = pci_resource_start(dev->pdev, 2);
3050	*mappable_end = pci_resource_len(dev->pdev, 2);
 
 
 
 
 
 
 
 
3051
3052	if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(39)))
3053		pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(39));
 
 
 
3054
3055	pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
 
 
 
 
 
3056
3057	if (INTEL_INFO(dev)->gen >= 9) {
3058		*stolen = gen9_get_stolen_size(snb_gmch_ctl);
3059		gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl);
3060	} else if (IS_CHERRYVIEW(dev)) {
3061		*stolen = chv_get_stolen_size(snb_gmch_ctl);
3062		gtt_size = chv_get_total_gtt_size(snb_gmch_ctl);
3063	} else {
3064		*stolen = gen8_get_stolen_size(snb_gmch_ctl);
3065		gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl);
3066	}
3067
3068	*gtt_total = (gtt_size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
 
 
 
 
 
 
 
3069
3070	if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
3071		chv_setup_private_ppat(dev_priv);
3072	else
3073		bdw_setup_private_ppat(dev_priv);
3074
3075	ret = ggtt_probe_common(dev, gtt_size);
 
 
 
3076
3077	dev_priv->gtt.base.clear_range = gen8_ggtt_clear_range;
3078	dev_priv->gtt.base.insert_entries = gen8_ggtt_insert_entries;
3079	dev_priv->gtt.base.bind_vma = ggtt_bind_vma;
3080	dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma;
3081
3082	if (IS_CHERRYVIEW(dev_priv))
3083		dev_priv->gtt.base.insert_entries = gen8_ggtt_insert_entries__BKL;
3084
3085	return ret;
3086}
3087
3088static int gen6_gmch_probe(struct drm_device *dev,
3089			   u64 *gtt_total,
3090			   size_t *stolen,
3091			   phys_addr_t *mappable_base,
3092			   u64 *mappable_end)
3093{
3094	struct drm_i915_private *dev_priv = dev->dev_private;
3095	unsigned int gtt_size;
 
3096	u16 snb_gmch_ctl;
3097	int ret;
3098
3099	*mappable_base = pci_resource_start(dev->pdev, 2);
3100	*mappable_end = pci_resource_len(dev->pdev, 2);
 
 
3101
3102	/* 64/512MB is the current min/max we actually know of, but this is just
3103	 * a coarse sanity check.
3104	 */
3105	if ((*mappable_end < (64<<20) || (*mappable_end > (512<<20)))) {
3106		DRM_ERROR("Unknown GMADR size (%llx)\n",
3107			  dev_priv->gtt.mappable_end);
3108		return -ENXIO;
3109	}
3110
3111	if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
3112		pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
3113	pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
3114
3115	*stolen = gen6_get_stolen_size(snb_gmch_ctl);
3116
3117	gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
3118	*gtt_total = (gtt_size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
3119
3120	ret = ggtt_probe_common(dev, gtt_size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3121
3122	dev_priv->gtt.base.clear_range = gen6_ggtt_clear_range;
3123	dev_priv->gtt.base.insert_entries = gen6_ggtt_insert_entries;
3124	dev_priv->gtt.base.bind_vma = ggtt_bind_vma;
3125	dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma;
3126
3127	return ret;
3128}
3129
3130static void gen6_gmch_remove(struct i915_address_space *vm)
3131{
3132
3133	struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
3134
3135	iounmap(gtt->gsm);
3136	free_scratch_page(vm->dev, vm->scratch_page);
3137}
3138
3139static int i915_gmch_probe(struct drm_device *dev,
3140			   u64 *gtt_total,
3141			   size_t *stolen,
3142			   phys_addr_t *mappable_base,
3143			   u64 *mappable_end)
3144{
3145	struct drm_i915_private *dev_priv = dev->dev_private;
 
3146	int ret;
3147
3148	ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL);
3149	if (!ret) {
3150		DRM_ERROR("failed to set up gmch\n");
3151		return -EIO;
3152	}
3153
3154	intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end);
 
 
 
 
 
 
 
 
 
 
3155
3156	dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
3157	dev_priv->gtt.base.insert_entries = i915_ggtt_insert_entries;
3158	dev_priv->gtt.base.clear_range = i915_ggtt_clear_range;
3159	dev_priv->gtt.base.bind_vma = ggtt_bind_vma;
3160	dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma;
3161
3162	if (unlikely(dev_priv->gtt.do_idle_maps))
3163		DRM_INFO("applying Ironlake quirks for intel_iommu\n");
 
 
 
 
 
 
3164
3165	return 0;
3166}
3167
3168static void i915_gmch_remove(struct i915_address_space *vm)
3169{
3170	intel_gmch_remove();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3171}
3172
3173int i915_gem_gtt_init(struct drm_device *dev)
 
 
 
 
3174{
3175	struct drm_i915_private *dev_priv = dev->dev_private;
3176	struct i915_gtt *gtt = &dev_priv->gtt;
3177	int ret;
3178
3179	if (INTEL_INFO(dev)->gen <= 5) {
3180		gtt->gtt_probe = i915_gmch_probe;
3181		gtt->base.cleanup = i915_gmch_remove;
3182	} else if (INTEL_INFO(dev)->gen < 8) {
3183		gtt->gtt_probe = gen6_gmch_probe;
3184		gtt->base.cleanup = gen6_gmch_remove;
3185		if (IS_HASWELL(dev) && dev_priv->ellc_size)
3186			gtt->base.pte_encode = iris_pte_encode;
3187		else if (IS_HASWELL(dev))
3188			gtt->base.pte_encode = hsw_pte_encode;
3189		else if (IS_VALLEYVIEW(dev))
3190			gtt->base.pte_encode = byt_pte_encode;
3191		else if (INTEL_INFO(dev)->gen >= 7)
3192			gtt->base.pte_encode = ivb_pte_encode;
3193		else
3194			gtt->base.pte_encode = snb_pte_encode;
3195	} else {
3196		dev_priv->gtt.gtt_probe = gen8_gmch_probe;
3197		dev_priv->gtt.base.cleanup = gen6_gmch_remove;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3198	}
3199
3200	gtt->base.dev = dev;
3201	gtt->base.is_ggtt = true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3202
3203	ret = gtt->gtt_probe(dev, &gtt->base.total, &gtt->stolen_size,
3204			     &gtt->mappable_base, &gtt->mappable_end);
 
 
 
 
3205	if (ret)
3206		return ret;
3207
3208	/*
3209	 * Initialise stolen early so that we may reserve preallocated
3210	 * objects for the BIOS to KMS transition.
3211	 */
3212	ret = i915_gem_init_stolen(dev);
3213	if (ret)
3214		goto out_gtt_cleanup;
3215
3216	/* GMADR is the PCI mmio aperture into the global GTT. */
3217	DRM_INFO("Memory usable by graphics device = %lluM\n",
3218		 gtt->base.total >> 20);
3219	DRM_DEBUG_DRIVER("GMADR size = %lldM\n", gtt->mappable_end >> 20);
3220	DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20);
3221#ifdef CONFIG_INTEL_IOMMU
3222	if (intel_iommu_gfx_mapped)
3223		DRM_INFO("VT-d active for gfx access\n");
3224#endif
3225	/*
3226	 * i915.enable_ppgtt is read-only, so do an early pass to validate the
3227	 * user's requested state against the hardware/driver capabilities.  We
3228	 * do this now so that we can print out any log messages once rather
3229	 * than every time we check intel_enable_ppgtt().
3230	 */
3231	i915.enable_ppgtt = sanitize_enable_ppgtt(dev, i915.enable_ppgtt);
3232	DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
3233
3234	return 0;
3235
3236out_gtt_cleanup:
3237	gtt->base.cleanup(&dev_priv->gtt.base);
3238
3239	return ret;
3240}
3241
3242void i915_gem_restore_gtt_mappings(struct drm_device *dev)
3243{
3244	struct drm_i915_private *dev_priv = dev->dev_private;
3245	struct drm_i915_gem_object *obj;
3246	struct i915_address_space *vm;
3247	struct i915_vma *vma;
3248	bool flush;
3249
3250	i915_check_and_clear_faults(dev);
3251
3252	/* First fill our portion of the GTT with scratch pages */
3253	dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
3254				       dev_priv->gtt.base.start,
3255				       dev_priv->gtt.base.total,
3256				       true);
3257
3258	/* Cache flush objects bound into GGTT and rebind them. */
3259	vm = &dev_priv->gtt.base;
3260	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
3261		flush = false;
3262		list_for_each_entry(vma, &obj->vma_list, obj_link) {
3263			if (vma->vm != vm)
3264				continue;
3265
3266			WARN_ON(i915_vma_bind(vma, obj->cache_level,
3267					      PIN_UPDATE));
 
3268
3269			flush = true;
3270		}
3271
3272		if (flush)
3273			i915_gem_clflush_object(obj, obj->pin_display);
3274	}
3275
3276	if (INTEL_INFO(dev)->gen >= 8) {
3277		if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
3278			chv_setup_private_ppat(dev_priv);
3279		else
3280			bdw_setup_private_ppat(dev_priv);
3281
 
 
 
 
3282		return;
3283	}
3284
3285	if (USES_PPGTT(dev)) {
3286		list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
3287			/* TODO: Perhaps it shouldn't be gen6 specific */
3288
3289			struct i915_hw_ppgtt *ppgtt =
3290					container_of(vm, struct i915_hw_ppgtt,
3291						     base);
3292
3293			if (i915_is_ggtt(vm))
3294				ppgtt = dev_priv->mm.aliasing_ppgtt;
3295
3296			gen6_write_page_range(dev_priv, &ppgtt->pd,
3297					      0, ppgtt->base.total);
3298		}
3299	}
3300
3301	i915_ggtt_flush(dev_priv);
3302}
3303
3304static struct i915_vma *
3305__i915_gem_vma_create(struct drm_i915_gem_object *obj,
3306		      struct i915_address_space *vm,
3307		      const struct i915_ggtt_view *ggtt_view)
3308{
3309	struct i915_vma *vma;
 
3310
3311	if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
3312		return ERR_PTR(-EINVAL);
3313
3314	vma = kmem_cache_zalloc(to_i915(obj->base.dev)->vmas, GFP_KERNEL);
3315	if (vma == NULL)
3316		return ERR_PTR(-ENOMEM);
3317
3318	INIT_LIST_HEAD(&vma->vm_link);
3319	INIT_LIST_HEAD(&vma->obj_link);
3320	INIT_LIST_HEAD(&vma->exec_list);
3321	vma->vm = vm;
3322	vma->obj = obj;
3323	vma->is_ggtt = i915_is_ggtt(vm);
3324
3325	if (i915_is_ggtt(vm))
3326		vma->ggtt_view = *ggtt_view;
3327	else
3328		i915_ppgtt_get(i915_vm_to_ppgtt(vm));
3329
3330	list_add_tail(&vma->obj_link, &obj->vma_list);
 
 
3331
3332	return vma;
3333}
 
3334
3335struct i915_vma *
3336i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
3337				  struct i915_address_space *vm)
3338{
3339	struct i915_vma *vma;
3340
3341	vma = i915_gem_obj_to_vma(obj, vm);
3342	if (!vma)
3343		vma = __i915_gem_vma_create(obj, vm,
3344					    i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL);
3345
3346	return vma;
3347}
3348
3349struct i915_vma *
3350i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj,
3351				       const struct i915_ggtt_view *view)
3352{
3353	struct i915_address_space *ggtt = i915_obj_to_ggtt(obj);
3354	struct i915_vma *vma;
 
3355
3356	if (WARN_ON(!view))
3357		return ERR_PTR(-EINVAL);
 
3358
3359	vma = i915_gem_obj_to_ggtt_view(obj, view);
 
3360
3361	if (IS_ERR(vma))
3362		return vma;
3363
3364	if (!vma)
3365		vma = __i915_gem_vma_create(obj, ggtt, view);
 
3366
3367	return vma;
 
 
3368
 
 
3369}
3370
3371static struct scatterlist *
3372rotate_pages(const dma_addr_t *in, unsigned int offset,
3373	     unsigned int width, unsigned int height,
3374	     unsigned int stride,
3375	     struct sg_table *st, struct scatterlist *sg)
3376{
3377	unsigned int column, row;
3378	unsigned int src_idx;
3379
3380	if (!sg) {
3381		st->nents = 0;
3382		sg = st->sgl;
3383	}
3384
3385	for (column = 0; column < width; column++) {
3386		src_idx = stride * (height - 1) + column;
3387		for (row = 0; row < height; row++) {
3388			st->nents++;
3389			/* We don't need the pages, but need to initialize
3390			 * the entries so the sg list can be happily traversed.
3391			 * The only thing we need are DMA addresses.
3392			 */
3393			sg_set_page(sg, NULL, PAGE_SIZE, 0);
3394			sg_dma_address(sg) = in[offset + src_idx];
3395			sg_dma_len(sg) = PAGE_SIZE;
 
3396			sg = sg_next(sg);
3397			src_idx -= stride;
3398		}
3399	}
3400
3401	return sg;
3402}
3403
3404static struct sg_table *
3405intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
3406			  struct drm_i915_gem_object *obj)
3407{
3408	unsigned int size_pages = rot_info->size >> PAGE_SHIFT;
3409	unsigned int size_pages_uv;
3410	struct sg_page_iter sg_iter;
3411	unsigned long i;
3412	dma_addr_t *page_addr_list;
3413	struct sg_table *st;
3414	unsigned int uv_start_page;
3415	struct scatterlist *sg;
3416	int ret = -ENOMEM;
 
3417
3418	/* Allocate a temporary list of source pages for random access. */
3419	page_addr_list = drm_malloc_ab(obj->base.size / PAGE_SIZE,
3420				       sizeof(dma_addr_t));
3421	if (!page_addr_list)
3422		return ERR_PTR(ret);
3423
3424	/* Account for UV plane with NV12. */
3425	if (rot_info->pixel_format == DRM_FORMAT_NV12)
3426		size_pages_uv = rot_info->size_uv >> PAGE_SHIFT;
3427	else
3428		size_pages_uv = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3429
3430	/* Allocate target SG list. */
3431	st = kmalloc(sizeof(*st), GFP_KERNEL);
3432	if (!st)
3433		goto err_st_alloc;
3434
3435	ret = sg_alloc_table(st, size_pages + size_pages_uv, GFP_KERNEL);
3436	if (ret)
3437		goto err_sg_alloc;
3438
3439	/* Populate source page list from the object. */
3440	i = 0;
3441	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
3442		page_addr_list[i] = sg_page_iter_dma_address(&sg_iter);
3443		i++;
3444	}
3445
3446	/* Rotate the pages. */
3447	sg = rotate_pages(page_addr_list, 0,
3448		     rot_info->width_pages, rot_info->height_pages,
3449		     rot_info->width_pages,
3450		     st, NULL);
3451
3452	/* Append the UV plane if NV12. */
3453	if (rot_info->pixel_format == DRM_FORMAT_NV12) {
3454		uv_start_page = size_pages;
3455
3456		/* Check for tile-row un-alignment. */
3457		if (offset_in_page(rot_info->uv_offset))
3458			uv_start_page--;
3459
3460		rot_info->uv_start_page = uv_start_page;
3461
3462		rotate_pages(page_addr_list, uv_start_page,
3463			     rot_info->width_pages_uv,
3464			     rot_info->height_pages_uv,
3465			     rot_info->width_pages_uv,
3466			     st, sg);
3467	}
3468
3469	DRM_DEBUG_KMS(
3470		      "Created rotated page mapping for object size %zu (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %u pages (%u plane 0)).\n",
3471		      obj->base.size, rot_info->pitch, rot_info->height,
3472		      rot_info->pixel_format, rot_info->width_pages,
3473		      rot_info->height_pages, size_pages + size_pages_uv,
3474		      size_pages);
3475
3476	drm_free_large(page_addr_list);
 
 
 
 
 
 
3477
3478	return st;
3479
3480err_sg_alloc:
3481	kfree(st);
3482err_st_alloc:
3483	drm_free_large(page_addr_list);
3484
3485	DRM_DEBUG_KMS(
3486		      "Failed to create rotated mapping for object size %zu! (%d) (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %u pages (%u plane 0))\n",
3487		      obj->base.size, ret, rot_info->pitch, rot_info->height,
3488		      rot_info->pixel_format, rot_info->width_pages,
3489		      rot_info->height_pages, size_pages + size_pages_uv,
3490		      size_pages);
3491	return ERR_PTR(ret);
3492}
3493
3494static struct sg_table *
3495intel_partial_pages(const struct i915_ggtt_view *view,
3496		    struct drm_i915_gem_object *obj)
3497{
3498	struct sg_table *st;
3499	struct scatterlist *sg;
3500	struct sg_page_iter obj_sg_iter;
 
3501	int ret = -ENOMEM;
3502
3503	st = kmalloc(sizeof(*st), GFP_KERNEL);
3504	if (!st)
3505		goto err_st_alloc;
3506
3507	ret = sg_alloc_table(st, view->params.partial.size, GFP_KERNEL);
3508	if (ret)
3509		goto err_sg_alloc;
3510
 
 
 
3511	sg = st->sgl;
3512	st->nents = 0;
3513	for_each_sg_page(obj->pages->sgl, &obj_sg_iter, obj->pages->nents,
3514		view->params.partial.offset)
3515	{
3516		if (st->nents >= view->params.partial.size)
3517			break;
3518
3519		sg_set_page(sg, NULL, PAGE_SIZE, 0);
3520		sg_dma_address(sg) = sg_page_iter_dma_address(&obj_sg_iter);
3521		sg_dma_len(sg) = PAGE_SIZE;
 
 
 
3522
3523		sg = sg_next(sg);
3524		st->nents++;
3525	}
 
 
 
 
 
 
3526
3527	return st;
 
 
 
3528
3529err_sg_alloc:
3530	kfree(st);
3531err_st_alloc:
3532	return ERR_PTR(ret);
3533}
3534
3535static int
3536i915_get_ggtt_vma_pages(struct i915_vma *vma)
3537{
3538	int ret = 0;
 
 
 
 
 
 
 
3539
3540	if (vma->ggtt_view.pages)
 
 
 
 
 
3541		return 0;
3542
3543	if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
3544		vma->ggtt_view.pages = vma->obj->pages;
3545	else if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
3546		vma->ggtt_view.pages =
3547			intel_rotate_fb_obj_pages(&vma->ggtt_view.params.rotated, vma->obj);
3548	else if (vma->ggtt_view.type == I915_GGTT_VIEW_PARTIAL)
3549		vma->ggtt_view.pages =
3550			intel_partial_pages(&vma->ggtt_view, vma->obj);
3551	else
3552		WARN_ONCE(1, "GGTT view %u not implemented!\n",
3553			  vma->ggtt_view.type);
3554
3555	if (!vma->ggtt_view.pages) {
3556		DRM_ERROR("Failed to get pages for GGTT view type %u!\n",
3557			  vma->ggtt_view.type);
3558		ret = -EINVAL;
3559	} else if (IS_ERR(vma->ggtt_view.pages)) {
3560		ret = PTR_ERR(vma->ggtt_view.pages);
3561		vma->ggtt_view.pages = NULL;
 
 
3562		DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n",
3563			  vma->ggtt_view.type, ret);
3564	}
3565
3566	return ret;
3567}
3568
3569/**
3570 * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
3571 * @vma: VMA to map
3572 * @cache_level: mapping cache level
3573 * @flags: flags like global or local mapping
3574 *
3575 * DMA addresses are taken from the scatter-gather table of this object (or of
3576 * this VMA in case of non-default GGTT views) and PTE entries set up.
3577 * Note that DMA addresses are also the only part of the SG table we care about.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3578 */
3579int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
3580		  u32 flags)
3581{
3582	int ret;
3583	u32 bind_flags;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3584
3585	if (WARN_ON(flags == 0))
3586		return -EINVAL;
3587
3588	bind_flags = 0;
3589	if (flags & PIN_GLOBAL)
3590		bind_flags |= GLOBAL_BIND;
3591	if (flags & PIN_USER)
3592		bind_flags |= LOCAL_BIND;
3593
3594	if (flags & PIN_UPDATE)
3595		bind_flags |= vma->bound;
3596	else
3597		bind_flags &= ~vma->bound;
3598
3599	if (bind_flags == 0)
3600		return 0;
3601
3602	if (vma->bound == 0 && vma->vm->allocate_va_range) {
3603		/* XXX: i915_vma_pin() will fix this +- hack */
3604		vma->pin_count++;
3605		trace_i915_va_alloc(vma);
3606		ret = vma->vm->allocate_va_range(vma->vm,
3607						 vma->node.start,
3608						 vma->node.size);
3609		vma->pin_count--;
3610		if (ret)
3611			return ret;
3612	}
3613
3614	ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
3615	if (ret)
3616		return ret;
3617
3618	vma->bound |= bind_flags;
 
 
 
 
 
 
 
 
 
 
 
 
 
3619
3620	return 0;
3621}
3622
3623/**
3624 * i915_ggtt_view_size - Get the size of a GGTT view.
3625 * @obj: Object the view is of.
3626 * @view: The view in question.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3627 *
3628 * @return The size of the GGTT view in bytes.
 
3629 */
3630size_t
3631i915_ggtt_view_size(struct drm_i915_gem_object *obj,
3632		    const struct i915_ggtt_view *view)
3633{
3634	if (view->type == I915_GGTT_VIEW_NORMAL) {
3635		return obj->base.size;
3636	} else if (view->type == I915_GGTT_VIEW_ROTATED) {
3637		return view->params.rotated.size;
3638	} else if (view->type == I915_GGTT_VIEW_PARTIAL) {
3639		return view->params.partial.size << PAGE_SHIFT;
3640	} else {
3641		WARN_ONCE(1, "GGTT view %u not implemented!\n", view->type);
3642		return obj->base.size;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3643	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3644}
v5.4
   1/*
   2 * Copyright © 2010 Daniel Vetter
   3 * Copyright © 2011-2014 Intel Corporation
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the "Software"),
   7 * to deal in the Software without restriction, including without limitation
   8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   9 * and/or sell copies of the Software, and to permit persons to whom the
  10 * Software is furnished to do so, subject to the following conditions:
  11 *
  12 * The above copyright notice and this permission notice (including the next
  13 * paragraph) shall be included in all copies or substantial portions of the
  14 * Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  22 * IN THE SOFTWARE.
  23 *
  24 */
  25
  26#include <linux/slab.h> /* fault-inject.h is not standalone! */
  27
  28#include <linux/fault-inject.h>
  29#include <linux/log2.h>
  30#include <linux/random.h>
  31#include <linux/seq_file.h>
  32#include <linux/stop_machine.h>
  33
  34#include <asm/set_memory.h>
  35#include <asm/smp.h>
  36
  37#include <drm/i915_drm.h>
  38
  39#include "display/intel_frontbuffer.h"
  40#include "gt/intel_gt.h"
  41
  42#include "i915_drv.h"
  43#include "i915_scatterlist.h"
  44#include "i915_trace.h"
  45#include "i915_vgpu.h"
  46
  47#define I915_GFP_ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
  48
  49#if IS_ENABLED(CONFIG_DRM_I915_TRACE_GTT)
  50#define DBG(...) trace_printk(__VA_ARGS__)
  51#else
  52#define DBG(...)
  53#endif
  54
  55/**
  56 * DOC: Global GTT views
  57 *
  58 * Background and previous state
  59 *
  60 * Historically objects could exists (be bound) in global GTT space only as
  61 * singular instances with a view representing all of the object's backing pages
  62 * in a linear fashion. This view will be called a normal view.
  63 *
  64 * To support multiple views of the same object, where the number of mapped
  65 * pages is not equal to the backing store, or where the layout of the pages
  66 * is not linear, concept of a GGTT view was added.
  67 *
  68 * One example of an alternative view is a stereo display driven by a single
  69 * image. In this case we would have a framebuffer looking like this
  70 * (2x2 pages):
  71 *
  72 *    12
  73 *    34
  74 *
  75 * Above would represent a normal GGTT view as normally mapped for GPU or CPU
  76 * rendering. In contrast, fed to the display engine would be an alternative
  77 * view which could look something like this:
  78 *
  79 *   1212
  80 *   3434
  81 *
  82 * In this example both the size and layout of pages in the alternative view is
  83 * different from the normal view.
  84 *
  85 * Implementation and usage
  86 *
  87 * GGTT views are implemented using VMAs and are distinguished via enum
  88 * i915_ggtt_view_type and struct i915_ggtt_view.
  89 *
  90 * A new flavour of core GEM functions which work with GGTT bound objects were
  91 * added with the _ggtt_ infix, and sometimes with _view postfix to avoid
  92 * renaming  in large amounts of code. They take the struct i915_ggtt_view
  93 * parameter encapsulating all metadata required to implement a view.
  94 *
  95 * As a helper for callers which are only interested in the normal view,
  96 * globally const i915_ggtt_view_normal singleton instance exists. All old core
  97 * GEM API functions, the ones not taking the view parameter, are operating on,
  98 * or with the normal GGTT view.
  99 *
 100 * Code wanting to add or use a new GGTT view needs to:
 101 *
 102 * 1. Add a new enum with a suitable name.
 103 * 2. Extend the metadata in the i915_ggtt_view structure if required.
 104 * 3. Add support to i915_get_vma_pages().
 105 *
 106 * New views are required to build a scatter-gather table from within the
 107 * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and
 108 * exists for the lifetime of an VMA.
 109 *
 110 * Core API is designed to have copy semantics which means that passed in
 111 * struct i915_ggtt_view does not need to be persistent (left around after
 112 * calling the core API functions).
 113 *
 114 */
 115
 116#define as_pd(x) container_of((x), typeof(struct i915_page_directory), pt)
 117
 118static int
 119i915_get_ggtt_vma_pages(struct i915_vma *vma);
 120
 121static void gen6_ggtt_invalidate(struct i915_ggtt *ggtt)
 
 
 
 
 
 
 
 122{
 123	struct intel_uncore *uncore = ggtt->vm.gt->uncore;
 
 
 
 
 
 
 
 
 
 124
 125	/*
 126	 * Note that as an uncached mmio write, this will flush the
 127	 * WCB of the writes into the GGTT before it triggers the invalidate.
 128	 */
 129	intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
 130}
 
 
 
 
 
 
 
 
 
 
 131
 132static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
 133{
 134	struct intel_uncore *uncore = ggtt->vm.gt->uncore;
 
 
 
 
 135
 136	gen6_ggtt_invalidate(ggtt);
 137	intel_uncore_write_fw(uncore, GEN8_GTCR, GEN8_GTCR_INVALIDATE);
 138}
 
 
 139
 140static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt)
 141{
 142	intel_gtt_chipset_flush();
 
 143}
 144
 145static int ppgtt_bind_vma(struct i915_vma *vma,
 146			  enum i915_cache_level cache_level,
 147			  u32 unused)
 148{
 149	u32 pte_flags;
 150	int err;
 151
 152	if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
 153		err = vma->vm->allocate_va_range(vma->vm,
 154						 vma->node.start, vma->size);
 155		if (err)
 156			return err;
 157	}
 158
 159	/* Applicable to VLV, and gen8+ */
 160	pte_flags = 0;
 161	if (i915_gem_object_is_readonly(vma->obj))
 162		pte_flags |= PTE_READ_ONLY;
 163
 164	vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
 
 165
 166	return 0;
 167}
 168
 169static void ppgtt_unbind_vma(struct i915_vma *vma)
 170{
 171	vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
 
 
 
 172}
 173
 174static int ppgtt_set_pages(struct i915_vma *vma)
 
 
 175{
 176	GEM_BUG_ON(vma->pages);
 177
 178	vma->pages = vma->obj->mm.pages;
 179
 180	vma->page_sizes = vma->obj->mm.page_sizes;
 181
 182	return 0;
 183}
 184
 185static void clear_pages(struct i915_vma *vma)
 186{
 187	GEM_BUG_ON(!vma->pages);
 188
 189	if (vma->pages != vma->obj->mm.pages) {
 190		sg_free_table(vma->pages);
 191		kfree(vma->pages);
 192	}
 193	vma->pages = NULL;
 194
 195	memset(&vma->page_sizes, 0, sizeof(vma->page_sizes));
 196}
 197
 198static u64 gen8_pte_encode(dma_addr_t addr,
 199			   enum i915_cache_level level,
 200			   u32 flags)
 201{
 202	gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW;
 203
 204	if (unlikely(flags & PTE_READ_ONLY))
 205		pte &= ~_PAGE_RW;
 206
 207	switch (level) {
 208	case I915_CACHE_NONE:
 209		pte |= PPAT_UNCACHED;
 210		break;
 211	case I915_CACHE_WT:
 212		pte |= PPAT_DISPLAY_ELLC;
 213		break;
 214	default:
 215		pte |= PPAT_CACHED;
 216		break;
 217	}
 218
 219	return pte;
 220}
 221
 222static u64 gen8_pde_encode(const dma_addr_t addr,
 223			   const enum i915_cache_level level)
 224{
 225	u64 pde = _PAGE_PRESENT | _PAGE_RW;
 226	pde |= addr;
 227	if (level != I915_CACHE_NONE)
 228		pde |= PPAT_CACHED_PDE;
 229	else
 230		pde |= PPAT_UNCACHED;
 231	return pde;
 232}
 233
 234static u64 snb_pte_encode(dma_addr_t addr,
 235			  enum i915_cache_level level,
 236			  u32 flags)
 
 
 
 237{
 238	gen6_pte_t pte = GEN6_PTE_VALID;
 239	pte |= GEN6_PTE_ADDR_ENCODE(addr);
 240
 241	switch (level) {
 242	case I915_CACHE_L3_LLC:
 243	case I915_CACHE_LLC:
 244		pte |= GEN6_PTE_CACHE_LLC;
 245		break;
 246	case I915_CACHE_NONE:
 247		pte |= GEN6_PTE_UNCACHED;
 248		break;
 249	default:
 250		MISSING_CASE(level);
 251	}
 252
 253	return pte;
 254}
 255
 256static u64 ivb_pte_encode(dma_addr_t addr,
 257			  enum i915_cache_level level,
 258			  u32 flags)
 259{
 260	gen6_pte_t pte = GEN6_PTE_VALID;
 261	pte |= GEN6_PTE_ADDR_ENCODE(addr);
 262
 263	switch (level) {
 264	case I915_CACHE_L3_LLC:
 265		pte |= GEN7_PTE_CACHE_L3_LLC;
 266		break;
 267	case I915_CACHE_LLC:
 268		pte |= GEN6_PTE_CACHE_LLC;
 269		break;
 270	case I915_CACHE_NONE:
 271		pte |= GEN6_PTE_UNCACHED;
 272		break;
 273	default:
 274		MISSING_CASE(level);
 275	}
 276
 277	return pte;
 278}
 279
 280static u64 byt_pte_encode(dma_addr_t addr,
 281			  enum i915_cache_level level,
 282			  u32 flags)
 283{
 284	gen6_pte_t pte = GEN6_PTE_VALID;
 285	pte |= GEN6_PTE_ADDR_ENCODE(addr);
 286
 287	if (!(flags & PTE_READ_ONLY))
 288		pte |= BYT_PTE_WRITEABLE;
 289
 290	if (level != I915_CACHE_NONE)
 291		pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
 292
 293	return pte;
 294}
 295
 296static u64 hsw_pte_encode(dma_addr_t addr,
 297			  enum i915_cache_level level,
 298			  u32 flags)
 299{
 300	gen6_pte_t pte = GEN6_PTE_VALID;
 301	pte |= HSW_PTE_ADDR_ENCODE(addr);
 302
 303	if (level != I915_CACHE_NONE)
 304		pte |= HSW_WB_LLC_AGE3;
 305
 306	return pte;
 307}
 308
 309static u64 iris_pte_encode(dma_addr_t addr,
 310			   enum i915_cache_level level,
 311			   u32 flags)
 312{
 313	gen6_pte_t pte = GEN6_PTE_VALID;
 314	pte |= HSW_PTE_ADDR_ENCODE(addr);
 315
 316	switch (level) {
 317	case I915_CACHE_NONE:
 318		break;
 319	case I915_CACHE_WT:
 320		pte |= HSW_WT_ELLC_LLC_AGE3;
 321		break;
 322	default:
 323		pte |= HSW_WB_ELLC_LLC_AGE3;
 324		break;
 325	}
 326
 327	return pte;
 328}
 329
 330static void stash_init(struct pagestash *stash)
 
 331{
 332	pagevec_init(&stash->pvec);
 333	spin_lock_init(&stash->lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 334}
 335
 336static struct page *stash_pop_page(struct pagestash *stash)
 337{
 338	struct page *page = NULL;
 
 339
 340	spin_lock(&stash->lock);
 341	if (likely(stash->pvec.nr))
 342		page = stash->pvec.pages[--stash->pvec.nr];
 343	spin_unlock(&stash->lock);
 344
 345	return page;
 
 
 346}
 347
 348static void stash_push_pagevec(struct pagestash *stash, struct pagevec *pvec)
 349{
 350	unsigned int nr;
 
 351
 352	spin_lock_nested(&stash->lock, SINGLE_DEPTH_NESTING);
 
 
 
 
 
 
 
 
 
 353
 354	nr = min_t(typeof(nr), pvec->nr, pagevec_space(&stash->pvec));
 355	memcpy(stash->pvec.pages + stash->pvec.nr,
 356	       pvec->pages + pvec->nr - nr,
 357	       sizeof(pvec->pages[0]) * nr);
 358	stash->pvec.nr += nr;
 359
 360	spin_unlock(&stash->lock);
 
 361
 362	pvec->nr -= nr;
 363}
 
 
 364
 365static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
 
 366{
 367	struct pagevec stack;
 368	struct page *page;
 369
 370	if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
 371		i915_gem_shrink_all(vm->i915);
 372
 373	page = stash_pop_page(&vm->free_pages);
 374	if (page)
 375		return page;
 376
 377	if (!vm->pt_kmap_wc)
 378		return alloc_page(gfp);
 
 
 379
 380	/* Look in our global stash of WC pages... */
 381	page = stash_pop_page(&vm->i915->mm.wc_stash);
 382	if (page)
 383		return page;
 384
 385	/*
 386	 * Otherwise batch allocate pages to amortize cost of set_pages_wc.
 387	 *
 388	 * We have to be careful as page allocation may trigger the shrinker
 389	 * (via direct reclaim) which will fill up the WC stash underneath us.
 390	 * So we add our WB pages into a temporary pvec on the stack and merge
 391	 * them into the WC stash after all the allocations are complete.
 392	 */
 393	pagevec_init(&stack);
 394	do {
 395		struct page *page;
 396
 397		page = alloc_page(gfp);
 398		if (unlikely(!page))
 399			break;
 
 400
 401		stack.pages[stack.nr++] = page;
 402	} while (pagevec_space(&stack));
 
 403
 404	if (stack.nr && !set_pages_array_wc(stack.pages, stack.nr)) {
 405		page = stack.pages[--stack.nr];
 
 
 
 406
 407		/* Merge spare WC pages to the global stash */
 408		if (stack.nr)
 409			stash_push_pagevec(&vm->i915->mm.wc_stash, &stack);
 410
 411		/* Push any surplus WC pages onto the local VM stash */
 412		if (stack.nr)
 413			stash_push_pagevec(&vm->free_pages, &stack);
 414	}
 415
 416	/* Return unwanted leftovers */
 417	if (unlikely(stack.nr)) {
 418		WARN_ON_ONCE(set_pages_array_wb(stack.pages, stack.nr));
 419		__pagevec_release(&stack);
 420	}
 421
 422	return page;
 
 423}
 424
 425static void vm_free_pages_release(struct i915_address_space *vm,
 426				  bool immediate)
 427{
 428	struct pagevec *pvec = &vm->free_pages.pvec;
 429	struct pagevec stack;
 
 
 430
 431	lockdep_assert_held(&vm->free_pages.lock);
 432	GEM_BUG_ON(!pagevec_count(pvec));
 
 433
 434	if (vm->pt_kmap_wc) {
 435		/*
 436		 * When we use WC, first fill up the global stash and then
 437		 * only if full immediately free the overflow.
 438		 */
 439		stash_push_pagevec(&vm->i915->mm.wc_stash, pvec);
 440
 441		/*
 442		 * As we have made some room in the VM's free_pages,
 443		 * we can wait for it to fill again. Unless we are
 444		 * inside i915_address_space_fini() and must
 445		 * immediately release the pages!
 446		 */
 447		if (pvec->nr <= (immediate ? 0 : PAGEVEC_SIZE - 1))
 448			return;
 449
 450		/*
 451		 * We have to drop the lock to allow ourselves to sleep,
 452		 * so take a copy of the pvec and clear the stash for
 453		 * others to use it as we sleep.
 454		 */
 455		stack = *pvec;
 456		pagevec_reinit(pvec);
 457		spin_unlock(&vm->free_pages.lock);
 458
 459		pvec = &stack;
 460		set_pages_array_wb(pvec->pages, pvec->nr);
 461
 462		spin_lock(&vm->free_pages.lock);
 463	}
 
 
 464
 465	__pagevec_release(pvec);
 466}
 467
 468static void vm_free_page(struct i915_address_space *vm, struct page *page)
 469{
 470	/*
 471	 * On !llc, we need to change the pages back to WB. We only do so
 472	 * in bulk, so we rarely need to change the page attributes here,
 473	 * but doing so requires a stop_machine() from deep inside arch/x86/mm.
 474	 * To make detection of the possible sleep more likely, use an
 475	 * unconditional might_sleep() for everybody.
 476	 */
 477	might_sleep();
 478	spin_lock(&vm->free_pages.lock);
 479	while (!pagevec_space(&vm->free_pages.pvec))
 480		vm_free_pages_release(vm, false);
 481	GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec) >= PAGEVEC_SIZE);
 482	pagevec_add(&vm->free_pages.pvec, page);
 483	spin_unlock(&vm->free_pages.lock);
 484}
 485
 486static void i915_address_space_fini(struct i915_address_space *vm)
 
 487{
 488	spin_lock(&vm->free_pages.lock);
 489	if (pagevec_count(&vm->free_pages.pvec))
 490		vm_free_pages_release(vm, true);
 491	GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec));
 492	spin_unlock(&vm->free_pages.lock);
 493
 494	drm_mm_takedown(&vm->mm);
 
 495
 496	mutex_destroy(&vm->mutex);
 497}
 498
 499static void ppgtt_destroy_vma(struct i915_address_space *vm)
 
 500{
 501	struct list_head *phases[] = {
 502		&vm->bound_list,
 503		&vm->unbound_list,
 504		NULL,
 505	}, **phase;
 506
 507	mutex_lock(&vm->i915->drm.struct_mutex);
 508	for (phase = phases; *phase; phase++) {
 509		struct i915_vma *vma, *vn;
 510
 511		list_for_each_entry_safe(vma, vn, *phase, vm_link)
 512			i915_vma_destroy(vma);
 513	}
 514	mutex_unlock(&vm->i915->drm.struct_mutex);
 515}
 516
 517static void __i915_vm_release(struct work_struct *work)
 518{
 519	struct i915_address_space *vm =
 520		container_of(work, struct i915_address_space, rcu.work);
 521
 522	ppgtt_destroy_vma(vm);
 
 
 523
 524	GEM_BUG_ON(!list_empty(&vm->bound_list));
 525	GEM_BUG_ON(!list_empty(&vm->unbound_list));
 
 
 
 
 
 
 526
 527	vm->cleanup(vm);
 528	i915_address_space_fini(vm);
 
 
 
 
 529
 530	kfree(vm);
 531}
 532
 533void i915_vm_release(struct kref *kref)
 534{
 535	struct i915_address_space *vm =
 536		container_of(kref, struct i915_address_space, ref);
 537
 538	GEM_BUG_ON(i915_is_ggtt(vm));
 539	trace_i915_ppgtt_release(vm);
 540
 541	vm->closed = true;
 542	queue_rcu_work(vm->i915->wq, &vm->rcu);
 543}
 544
 545static void i915_address_space_init(struct i915_address_space *vm, int subclass)
 
 546{
 547	kref_init(&vm->ref);
 548	INIT_RCU_WORK(&vm->rcu, __i915_vm_release);
 549
 550	/*
 551	 * The vm->mutex must be reclaim safe (for use in the shrinker).
 552	 * Do a dummy acquire now under fs_reclaim so that any allocation
 553	 * attempt holding the lock is immediately reported by lockdep.
 554	 */
 555	mutex_init(&vm->mutex);
 556	lockdep_set_subclass(&vm->mutex, subclass);
 557	i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex);
 558
 559	GEM_BUG_ON(!vm->total);
 560	drm_mm_init(&vm->mm, 0, vm->total);
 561	vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
 562
 563	stash_init(&vm->free_pages);
 564
 565	INIT_LIST_HEAD(&vm->unbound_list);
 566	INIT_LIST_HEAD(&vm->bound_list);
 567}
 568
 569static int __setup_page_dma(struct i915_address_space *vm,
 570			    struct i915_page_dma *p,
 571			    gfp_t gfp)
 572{
 573	p->page = vm_alloc_page(vm, gfp | I915_GFP_ALLOW_FAIL);
 574	if (unlikely(!p->page))
 
 
 
 
 575		return -ENOMEM;
 576
 577	p->daddr = dma_map_page_attrs(vm->dma,
 578				      p->page, 0, PAGE_SIZE,
 579				      PCI_DMA_BIDIRECTIONAL,
 580				      DMA_ATTR_SKIP_CPU_SYNC |
 581				      DMA_ATTR_NO_WARN);
 582	if (unlikely(dma_mapping_error(vm->dma, p->daddr))) {
 583		vm_free_page(vm, p->page);
 584		return -ENOMEM;
 585	}
 586
 587	return 0;
 588}
 589
 590static int setup_page_dma(struct i915_address_space *vm,
 591			  struct i915_page_dma *p)
 592{
 593	return __setup_page_dma(vm, p, __GFP_HIGHMEM);
 
 
 594}
 595
 596static void cleanup_page_dma(struct i915_address_space *vm,
 597			     struct i915_page_dma *p)
 598{
 599	dma_unmap_page(vm->dma, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
 600	vm_free_page(vm, p->page);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 601}
 602
 603#define kmap_atomic_px(px) kmap_atomic(px_base(px)->page)
 
 
 
 
 
 
 
 
 604
 605static void
 606fill_page_dma(const struct i915_page_dma *p, const u64 val, unsigned int count)
 607{
 608	kunmap_atomic(memset64(kmap_atomic(p->page), val, count));
 
 
 
 
 609}
 610
 611#define fill_px(px, v) fill_page_dma(px_base(px), (v), PAGE_SIZE / sizeof(u64))
 612#define fill32_px(px, v) do {						\
 613	u64 v__ = lower_32_bits(v);					\
 614	fill_px((px), v__ << 32 | v__);					\
 615} while (0)
 
 
 
 
 
 616
 617static int
 618setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
 
 
 
 619{
 620	unsigned long size;
 621
 622	/*
 623	 * In order to utilize 64K pages for an object with a size < 2M, we will
 624	 * need to support a 64K scratch page, given that every 16th entry for a
 625	 * page-table operating in 64K mode must point to a properly aligned 64K
 626	 * region, including any PTEs which happen to point to scratch.
 627	 *
 628	 * This is only relevant for the 48b PPGTT where we support
 629	 * huge-gtt-pages, see also i915_vma_insert(). However, as we share the
 630	 * scratch (read-only) between all vm, we create one 64k scratch page
 631	 * for all.
 632	 */
 633	size = I915_GTT_PAGE_SIZE_4K;
 634	if (i915_vm_is_4lvl(vm) &&
 635	    HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K)) {
 636		size = I915_GTT_PAGE_SIZE_64K;
 637		gfp |= __GFP_NOWARN;
 638	}
 639	gfp |= __GFP_ZERO | __GFP_RETRY_MAYFAIL;
 640
 641	do {
 642		unsigned int order = get_order(size);
 643		struct page *page;
 644		dma_addr_t addr;
 645
 646		page = alloc_pages(gfp, order);
 647		if (unlikely(!page))
 648			goto skip;
 649
 650		addr = dma_map_page_attrs(vm->dma,
 651					  page, 0, size,
 652					  PCI_DMA_BIDIRECTIONAL,
 653					  DMA_ATTR_SKIP_CPU_SYNC |
 654					  DMA_ATTR_NO_WARN);
 655		if (unlikely(dma_mapping_error(vm->dma, addr)))
 656			goto free_page;
 657
 658		if (unlikely(!IS_ALIGNED(addr, size)))
 659			goto unmap_page;
 660
 661		vm->scratch[0].base.page = page;
 662		vm->scratch[0].base.daddr = addr;
 663		vm->scratch_order = order;
 664		return 0;
 665
 666unmap_page:
 667		dma_unmap_page(vm->dma, addr, size, PCI_DMA_BIDIRECTIONAL);
 668free_page:
 669		__free_pages(page, order);
 670skip:
 671		if (size == I915_GTT_PAGE_SIZE_4K)
 672			return -ENOMEM;
 673
 674		size = I915_GTT_PAGE_SIZE_4K;
 675		gfp &= ~__GFP_NOWARN;
 676	} while (1);
 677}
 678
 679static void cleanup_scratch_page(struct i915_address_space *vm)
 
 
 
 
 680{
 681	struct i915_page_dma *p = px_base(&vm->scratch[0]);
 682	unsigned int order = vm->scratch_order;
 683
 684	dma_unmap_page(vm->dma, p->daddr, BIT(order) << PAGE_SHIFT,
 685		       PCI_DMA_BIDIRECTIONAL);
 686	__free_pages(p->page, order);
 687}
 688
 689static void free_scratch(struct i915_address_space *vm)
 
 
 
 690{
 691	int i;
 
 
 
 692
 693	if (!px_dma(&vm->scratch[0])) /* set to 0 on clones */
 694		return;
 
 695
 696	for (i = 1; i <= vm->top; i++) {
 697		if (!px_dma(&vm->scratch[i]))
 698			break;
 699		cleanup_page_dma(vm, px_base(&vm->scratch[i]));
 700	}
 
 
 701
 702	cleanup_scratch_page(vm);
 703}
 704
 705static struct i915_page_table *alloc_pt(struct i915_address_space *vm)
 
 706{
 707	struct i915_page_table *pt;
 708
 709	pt = kmalloc(sizeof(*pt), I915_GFP_ALLOW_FAIL);
 710	if (unlikely(!pt))
 711		return ERR_PTR(-ENOMEM);
 712
 713	if (unlikely(setup_page_dma(vm, &pt->base))) {
 714		kfree(pt);
 715		return ERR_PTR(-ENOMEM);
 716	}
 717
 718	atomic_set(&pt->used, 0);
 719	return pt;
 
 
 
 
 
 720}
 721
 722static struct i915_page_directory *__alloc_pd(size_t sz)
 
 
 
 
 723{
 724	struct i915_page_directory *pd;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 725
 726	pd = kzalloc(sz, I915_GFP_ALLOW_FAIL);
 727	if (unlikely(!pd))
 728		return NULL;
 729
 730	spin_lock_init(&pd->lock);
 731	return pd;
 
 
 
 
 
 732}
 733
 734static struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
 
 
 
 735{
 736	struct i915_page_directory *pd;
 
 
 
 
 
 
 
 
 
 
 737
 738	pd = __alloc_pd(sizeof(*pd));
 739	if (unlikely(!pd))
 740		return ERR_PTR(-ENOMEM);
 
 
 
 741
 742	if (unlikely(setup_page_dma(vm, px_base(pd)))) {
 743		kfree(pd);
 744		return ERR_PTR(-ENOMEM);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 745	}
 746
 747	return pd;
 
 748}
 749
 750static void free_pd(struct i915_address_space *vm, struct i915_page_dma *pd)
 
 
 
 
 751{
 752	cleanup_page_dma(vm, pd);
 753	kfree(pd);
 754}
 755
 756#define free_px(vm, px) free_pd(vm, px_base(px))
 757
 758static inline void
 759write_dma_entry(struct i915_page_dma * const pdma,
 760		const unsigned short idx,
 761		const u64 encoded_entry)
 762{
 763	u64 * const vaddr = kmap_atomic(pdma->page);
 
 764
 765	vaddr[idx] = encoded_entry;
 766	kunmap_atomic(vaddr);
 
 
 
 767}
 768
 769static inline void
 770__set_pd_entry(struct i915_page_directory * const pd,
 771	       const unsigned short idx,
 772	       struct i915_page_dma * const to,
 773	       u64 (*encode)(const dma_addr_t, const enum i915_cache_level))
 774{
 775	/* Each thread pre-pins the pd, and we may have a thread per pde. */
 776	GEM_BUG_ON(atomic_read(px_used(pd)) > 2 * ARRAY_SIZE(pd->entry));
 777
 778	atomic_inc(px_used(pd));
 779	pd->entry[idx] = to;
 780	write_dma_entry(px_base(pd), idx, encode(to->daddr, I915_CACHE_LLC));
 
 
 
 
 
 
 
 781}
 782
 783#define set_pd_entry(pd, idx, to) \
 784	__set_pd_entry((pd), (idx), px_base(to), gen8_pde_encode)
 785
 786static inline void
 787clear_pd_entry(struct i915_page_directory * const pd,
 788	       const unsigned short idx,
 789	       const struct i915_page_scratch * const scratch)
 790{
 791	GEM_BUG_ON(atomic_read(px_used(pd)) == 0);
 792
 793	write_dma_entry(px_base(pd), idx, scratch->encode);
 794	pd->entry[idx] = NULL;
 795	atomic_dec(px_used(pd));
 796}
 797
 798static bool
 799release_pd_entry(struct i915_page_directory * const pd,
 800		 const unsigned short idx,
 801		 struct i915_page_table * const pt,
 802		 const struct i915_page_scratch * const scratch)
 803{
 804	bool free = false;
 805
 806	if (atomic_add_unless(&pt->used, -1, 1))
 807		return false;
 
 
 
 
 808
 809	spin_lock(&pd->lock);
 810	if (atomic_dec_and_test(&pt->used)) {
 811		clear_pd_entry(pd, idx, scratch);
 812		free = true;
 
 
 
 
 813	}
 814	spin_unlock(&pd->lock);
 815
 816	return free;
 817}
 
 
 818
 819/*
 820 * PDE TLBs are a pain to invalidate on GEN8+. When we modify
 821 * the page table structures, we mark them dirty so that
 822 * context switching/execlist queuing code takes extra steps
 823 * to ensure that tlbs are flushed.
 824 */
 825static void mark_tlbs_dirty(struct i915_ppgtt *ppgtt)
 826{
 827	ppgtt->pd_dirty_engines = ALL_ENGINES;
 828}
 829
 830static void gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create)
 831{
 832	struct drm_i915_private *dev_priv = ppgtt->vm.i915;
 833	enum vgt_g2v_type msg;
 
 
 834	int i;
 835
 836	if (create)
 837		atomic_inc(px_used(ppgtt->pd)); /* never remove */
 838	else
 839		atomic_dec(px_used(ppgtt->pd));
 840
 841	mutex_lock(&dev_priv->vgpu.lock);
 842
 843	if (i915_vm_is_4lvl(&ppgtt->vm)) {
 844		const u64 daddr = px_dma(ppgtt->pd);
 845
 846		I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
 847		I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
 848
 849		msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
 850				VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY);
 851	} else {
 852		for (i = 0; i < GEN8_3LVL_PDPES; i++) {
 853			const u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
 854
 855			I915_WRITE(vgtif_reg(pdp[i].lo), lower_32_bits(daddr));
 856			I915_WRITE(vgtif_reg(pdp[i].hi), upper_32_bits(daddr));
 857		}
 858
 859		msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
 860				VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY);
 861	}
 862
 863	/* g2v_notify atomically (via hv trap) consumes the message packet. */
 864	I915_WRITE(vgtif_reg(g2v_notify), msg);
 865
 866	mutex_unlock(&dev_priv->vgpu.lock);
 867}
 868
 869/* Index shifts into the pagetable are offset by GEN8_PTE_SHIFT [12] */
 870#define GEN8_PAGE_SIZE (SZ_4K) /* page and page-directory sizes are the same */
 871#define GEN8_PTE_SHIFT (ilog2(GEN8_PAGE_SIZE))
 872#define GEN8_PDES (GEN8_PAGE_SIZE / sizeof(u64))
 873#define gen8_pd_shift(lvl) ((lvl) * ilog2(GEN8_PDES))
 874#define gen8_pd_index(i, lvl) i915_pde_index((i), gen8_pd_shift(lvl))
 875#define __gen8_pte_shift(lvl) (GEN8_PTE_SHIFT + gen8_pd_shift(lvl))
 876#define __gen8_pte_index(a, lvl) i915_pde_index((a), __gen8_pte_shift(lvl))
 877
 878static inline unsigned int
 879gen8_pd_range(u64 start, u64 end, int lvl, unsigned int *idx)
 880{
 881	const int shift = gen8_pd_shift(lvl);
 882	const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
 883
 884	GEM_BUG_ON(start >= end);
 885	end += ~mask >> gen8_pd_shift(1);
 886
 887	*idx = i915_pde_index(start, shift);
 888	if ((start ^ end) & mask)
 889		return GEN8_PDES - *idx;
 890	else
 891		return i915_pde_index(end, shift) - *idx;
 892}
 893
 894static inline bool gen8_pd_contains(u64 start, u64 end, int lvl)
 895{
 896	const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
 897
 898	GEM_BUG_ON(start >= end);
 899	return (start ^ end) & mask && (start & ~mask) == 0;
 
 
 
 900}
 901
 902static inline unsigned int gen8_pt_count(u64 start, u64 end)
 
 903{
 904	GEM_BUG_ON(start >= end);
 905	if ((start ^ end) >> gen8_pd_shift(1))
 906		return GEN8_PDES - (start & (GEN8_PDES - 1));
 907	else
 908		return end - start;
 909}
 910
 911static inline unsigned int gen8_pd_top_count(const struct i915_address_space *vm)
 912{
 913	unsigned int shift = __gen8_pte_shift(vm->top);
 914	return (vm->total + (1ull << shift) - 1) >> shift;
 915}
 916
 917static inline struct i915_page_directory *
 918gen8_pdp_for_page_index(struct i915_address_space * const vm, const u64 idx)
 919{
 920	struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
 921
 922	if (vm->top == 2)
 923		return ppgtt->pd;
 924	else
 925		return i915_pd_entry(ppgtt->pd, gen8_pd_index(idx, vm->top));
 926}
 927
 928static inline struct i915_page_directory *
 929gen8_pdp_for_page_address(struct i915_address_space * const vm, const u64 addr)
 930{
 931	return gen8_pdp_for_page_index(vm, addr >> GEN8_PTE_SHIFT);
 932}
 933
 934static void __gen8_ppgtt_cleanup(struct i915_address_space *vm,
 935				 struct i915_page_directory *pd,
 936				 int count, int lvl)
 937{
 938	if (lvl) {
 939		void **pde = pd->entry;
 940
 941		do {
 942			if (!*pde)
 943				continue;
 944
 945			__gen8_ppgtt_cleanup(vm, *pde, GEN8_PDES, lvl - 1);
 946		} while (pde++, --count);
 947	}
 948
 949	free_px(vm, pd);
 950}
 951
 952static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
 953{
 954	struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
 
 955
 956	if (intel_vgpu_active(vm->i915))
 957		gen8_ppgtt_notify_vgt(ppgtt, false);
 958
 959	__gen8_ppgtt_cleanup(vm, ppgtt->pd, gen8_pd_top_count(vm), vm->top);
 960	free_scratch(vm);
 
 
 
 
 961}
 962
 963static u64 __gen8_ppgtt_clear(struct i915_address_space * const vm,
 964			      struct i915_page_directory * const pd,
 965			      u64 start, const u64 end, int lvl)
 966{
 967	const struct i915_page_scratch * const scratch = &vm->scratch[lvl];
 968	unsigned int idx, len;
 969
 970	GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
 971
 972	len = gen8_pd_range(start, end, lvl--, &idx);
 973	DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n",
 974	    __func__, vm, lvl + 1, start, end,
 975	    idx, len, atomic_read(px_used(pd)));
 976	GEM_BUG_ON(!len || len >= atomic_read(px_used(pd)));
 977
 978	do {
 979		struct i915_page_table *pt = pd->entry[idx];
 980
 981		if (atomic_fetch_inc(&pt->used) >> gen8_pd_shift(1) &&
 982		    gen8_pd_contains(start, end, lvl)) {
 983			DBG("%s(%p):{ lvl:%d, idx:%d, start:%llx, end:%llx } removing pd\n",
 984			    __func__, vm, lvl + 1, idx, start, end);
 985			clear_pd_entry(pd, idx, scratch);
 986			__gen8_ppgtt_cleanup(vm, as_pd(pt), I915_PDES, lvl);
 987			start += (u64)I915_PDES << gen8_pd_shift(lvl);
 
 
 
 
 
 
 
 
 988			continue;
 989		}
 990
 991		if (lvl) {
 992			start = __gen8_ppgtt_clear(vm, as_pd(pt),
 993						   start, end, lvl);
 994		} else {
 995			unsigned int count;
 996			u64 *vaddr;
 
 
 
 997
 998			count = gen8_pt_count(start, end);
 999			DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } removing pte\n",
1000			    __func__, vm, lvl, start, end,
1001			    gen8_pd_index(start, 0), count,
1002			    atomic_read(&pt->used));
1003			GEM_BUG_ON(!count || count >= atomic_read(&pt->used));
1004
1005			vaddr = kmap_atomic_px(pt);
1006			memset64(vaddr + gen8_pd_index(start, 0),
1007				 vm->scratch[0].encode,
1008				 count);
1009			kunmap_atomic(vaddr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1010
1011			atomic_sub(count, &pt->used);
1012			start += count;
1013		}
1014
1015		if (release_pd_entry(pd, idx, pt, scratch))
1016			free_px(vm, pt);
1017	} while (idx++, --len);
1018
1019	return start;
1020}
 
 
 
 
 
 
 
1021
1022static void gen8_ppgtt_clear(struct i915_address_space *vm,
1023			     u64 start, u64 length)
1024{
1025	GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
1026	GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
1027	GEM_BUG_ON(range_overflows(start, length, vm->total));
1028
1029	start >>= GEN8_PTE_SHIFT;
1030	length >>= GEN8_PTE_SHIFT;
1031	GEM_BUG_ON(length == 0);
1032
1033	__gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd,
1034			   start, start + length, vm->top);
1035}
1036
1037static int __gen8_ppgtt_alloc(struct i915_address_space * const vm,
1038			      struct i915_page_directory * const pd,
1039			      u64 * const start, const u64 end, int lvl)
1040{
1041	const struct i915_page_scratch * const scratch = &vm->scratch[lvl];
1042	struct i915_page_table *alloc = NULL;
1043	unsigned int idx, len;
1044	int ret = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1045
1046	GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
 
 
 
 
 
 
 
 
1047
1048	len = gen8_pd_range(*start, end, lvl--, &idx);
1049	DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n",
1050	    __func__, vm, lvl + 1, *start, end,
1051	    idx, len, atomic_read(px_used(pd)));
1052	GEM_BUG_ON(!len || (idx + len - 1) >> gen8_pd_shift(1));
1053
1054	spin_lock(&pd->lock);
1055	GEM_BUG_ON(!atomic_read(px_used(pd))); /* Must be pinned! */
1056	do {
1057		struct i915_page_table *pt = pd->entry[idx];
1058
1059		if (!pt) {
1060			spin_unlock(&pd->lock);
1061
1062			DBG("%s(%p):{ lvl:%d, idx:%d } allocating new tree\n",
1063			    __func__, vm, lvl + 1, idx);
1064
1065			pt = fetch_and_zero(&alloc);
1066			if (lvl) {
1067				if (!pt) {
1068					pt = &alloc_pd(vm)->pt;
1069					if (IS_ERR(pt)) {
1070						ret = PTR_ERR(pt);
1071						goto out;
1072					}
1073				}
1074
1075				fill_px(pt, vm->scratch[lvl].encode);
1076			} else {
1077				if (!pt) {
1078					pt = alloc_pt(vm);
1079					if (IS_ERR(pt)) {
1080						ret = PTR_ERR(pt);
1081						goto out;
1082					}
1083				}
1084
1085				if (intel_vgpu_active(vm->i915) ||
1086				    gen8_pt_count(*start, end) < I915_PDES)
1087					fill_px(pt, vm->scratch[lvl].encode);
1088			}
1089
1090			spin_lock(&pd->lock);
1091			if (likely(!pd->entry[idx]))
1092				set_pd_entry(pd, idx, pt);
1093			else
1094				alloc = pt, pt = pd->entry[idx];
1095		}
1096
1097		if (lvl) {
1098			atomic_inc(&pt->used);
1099			spin_unlock(&pd->lock);
1100
1101			ret = __gen8_ppgtt_alloc(vm, as_pd(pt),
1102						 start, end, lvl);
1103			if (unlikely(ret)) {
1104				if (release_pd_entry(pd, idx, pt, scratch))
1105					free_px(vm, pt);
1106				goto out;
1107			}
1108
1109			spin_lock(&pd->lock);
1110			atomic_dec(&pt->used);
1111			GEM_BUG_ON(!atomic_read(&pt->used));
1112		} else {
1113			unsigned int count = gen8_pt_count(*start, end);
1114
1115			DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } inserting pte\n",
1116			    __func__, vm, lvl, *start, end,
1117			    gen8_pd_index(*start, 0), count,
1118			    atomic_read(&pt->used));
1119
1120			atomic_add(count, &pt->used);
1121			/* All other pdes may be simultaneously removed */
1122			GEM_BUG_ON(atomic_read(&pt->used) > 2 * I915_PDES);
1123			*start += count;
1124		}
1125	} while (idx++, --len);
1126	spin_unlock(&pd->lock);
1127out:
1128	if (alloc)
1129		free_px(vm, alloc);
1130	return ret;
1131}
1132
1133static int gen8_ppgtt_alloc(struct i915_address_space *vm,
1134			    u64 start, u64 length)
 
 
 
 
 
1135{
1136	u64 from;
1137	int err;
 
 
 
 
1138
1139	GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
1140	GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
1141	GEM_BUG_ON(range_overflows(start, length, vm->total));
 
1142
1143	start >>= GEN8_PTE_SHIFT;
1144	length >>= GEN8_PTE_SHIFT;
1145	GEM_BUG_ON(length == 0);
1146	from = start;
1147
1148	err = __gen8_ppgtt_alloc(vm, i915_vm_to_ppgtt(vm)->pd,
1149				 &start, start + length, vm->top);
1150	if (unlikely(err && from != start))
1151		__gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd,
1152				   from, start, vm->top);
1153
1154	return err;
 
 
1155}
1156
1157static inline struct sgt_dma {
1158	struct scatterlist *sg;
1159	dma_addr_t dma, max;
1160} sgt_dma(struct i915_vma *vma) {
1161	struct scatterlist *sg = vma->pages->sgl;
1162	dma_addr_t addr = sg_dma_address(sg);
1163	return (struct sgt_dma) { sg, addr, addr + sg->length };
 
1164}
1165
1166static __always_inline u64
1167gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
1168		      struct i915_page_directory *pdp,
1169		      struct sgt_dma *iter,
1170		      u64 idx,
1171		      enum i915_cache_level cache_level,
1172		      u32 flags)
1173{
 
 
 
 
1174	struct i915_page_directory *pd;
1175	const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
1176	gen8_pte_t *vaddr;
 
 
 
1177
1178	pd = i915_pd_entry(pdp, gen8_pd_index(idx, 2));
1179	vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
1180	do {
1181		vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma;
1182
1183		iter->dma += I915_GTT_PAGE_SIZE;
1184		if (iter->dma >= iter->max) {
1185			iter->sg = __sg_next(iter->sg);
1186			if (!iter->sg) {
1187				idx = 0;
1188				break;
1189			}
1190
1191			iter->dma = sg_dma_address(iter->sg);
1192			iter->max = iter->dma + iter->sg->length;
1193		}
1194
1195		if (gen8_pd_index(++idx, 0) == 0) {
1196			if (gen8_pd_index(idx, 1) == 0) {
1197				/* Limited by sg length for 3lvl */
1198				if (gen8_pd_index(idx, 2) == 0)
1199					break;
1200
1201				pd = pdp->entry[gen8_pd_index(idx, 2)];
1202			}
 
 
 
 
 
1203
1204			kunmap_atomic(vaddr);
1205			vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
1206		}
1207	} while (1);
1208	kunmap_atomic(vaddr);
 
 
1209
1210	return idx;
1211}
1212
1213static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
1214				   struct sgt_dma *iter,
1215				   enum i915_cache_level cache_level,
1216				   u32 flags)
1217{
1218	const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
1219	u64 start = vma->node.start;
1220	dma_addr_t rem = iter->sg->length;
1221
1222	GEM_BUG_ON(!i915_vm_is_4lvl(vma->vm));
1223
1224	do {
1225		struct i915_page_directory * const pdp =
1226			gen8_pdp_for_page_address(vma->vm, start);
1227		struct i915_page_directory * const pd =
1228			i915_pd_entry(pdp, __gen8_pte_index(start, 2));
1229		gen8_pte_t encode = pte_encode;
1230		unsigned int maybe_64K = -1;
1231		unsigned int page_size;
1232		gen8_pte_t *vaddr;
1233		u16 index;
1234
1235		if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_2M &&
1236		    IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) &&
1237		    rem >= I915_GTT_PAGE_SIZE_2M &&
1238		    !__gen8_pte_index(start, 0)) {
1239			index = __gen8_pte_index(start, 1);
1240			encode |= GEN8_PDE_PS_2M;
1241			page_size = I915_GTT_PAGE_SIZE_2M;
1242
1243			vaddr = kmap_atomic_px(pd);
1244		} else {
1245			struct i915_page_table *pt =
1246				i915_pt_entry(pd, __gen8_pte_index(start, 1));
1247
1248			index = __gen8_pte_index(start, 0);
1249			page_size = I915_GTT_PAGE_SIZE;
1250
1251			if (!index &&
1252			    vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K &&
1253			    IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
1254			    (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
1255			     rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE))
1256				maybe_64K = __gen8_pte_index(start, 1);
1257
1258			vaddr = kmap_atomic_px(pt);
1259		}
1260
1261		do {
1262			GEM_BUG_ON(iter->sg->length < page_size);
1263			vaddr[index++] = encode | iter->dma;
1264
1265			start += page_size;
1266			iter->dma += page_size;
1267			rem -= page_size;
1268			if (iter->dma >= iter->max) {
1269				iter->sg = __sg_next(iter->sg);
1270				if (!iter->sg)
1271					break;
1272
1273				rem = iter->sg->length;
1274				iter->dma = sg_dma_address(iter->sg);
1275				iter->max = iter->dma + rem;
1276
1277				if (maybe_64K != -1 && index < I915_PDES &&
1278				    !(IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
1279				      (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
1280				       rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE)))
1281					maybe_64K = -1;
1282
1283				if (unlikely(!IS_ALIGNED(iter->dma, page_size)))
1284					break;
1285			}
1286		} while (rem >= page_size && index < I915_PDES);
1287
1288		kunmap_atomic(vaddr);
 
 
 
 
1289
1290		/*
1291		 * Is it safe to mark the 2M block as 64K? -- Either we have
1292		 * filled whole page-table with 64K entries, or filled part of
1293		 * it and have reached the end of the sg table and we have
1294		 * enough padding.
1295		 */
1296		if (maybe_64K != -1 &&
1297		    (index == I915_PDES ||
1298		     (i915_vm_has_scratch_64K(vma->vm) &&
1299		      !iter->sg && IS_ALIGNED(vma->node.start +
1300					      vma->node.size,
1301					      I915_GTT_PAGE_SIZE_2M)))) {
1302			vaddr = kmap_atomic_px(pd);
1303			vaddr[maybe_64K] |= GEN8_PDE_IPS_64K;
1304			kunmap_atomic(vaddr);
1305			page_size = I915_GTT_PAGE_SIZE_64K;
1306
1307			/*
1308			 * We write all 4K page entries, even when using 64K
1309			 * pages. In order to verify that the HW isn't cheating
1310			 * by using the 4K PTE instead of the 64K PTE, we want
1311			 * to remove all the surplus entries. If the HW skipped
1312			 * the 64K PTE, it will read/write into the scratch page
1313			 * instead - which we detect as missing results during
1314			 * selftests.
1315			 */
1316			if (I915_SELFTEST_ONLY(vma->vm->scrub_64K)) {
1317				u16 i;
1318
1319				encode = vma->vm->scratch[0].encode;
1320				vaddr = kmap_atomic_px(i915_pt_entry(pd, maybe_64K));
1321
1322				for (i = 1; i < index; i += 16)
1323					memset64(vaddr + i, encode, 15);
 
 
 
 
 
1324
1325				kunmap_atomic(vaddr);
1326			}
1327		}
1328
1329		vma->page_sizes.gtt |= page_size;
1330	} while (iter->sg);
1331}
 
1332
1333static void gen8_ppgtt_insert(struct i915_address_space *vm,
1334			      struct i915_vma *vma,
1335			      enum i915_cache_level cache_level,
1336			      u32 flags)
1337{
1338	struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
1339	struct sgt_dma iter = sgt_dma(vma);
1340
1341	if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
1342		gen8_ppgtt_insert_huge(vma, &iter, cache_level, flags);
1343	} else  {
1344		u64 idx = vma->node.start >> GEN8_PTE_SHIFT;
1345
1346		do {
1347			struct i915_page_directory * const pdp =
1348				gen8_pdp_for_page_index(vm, idx);
 
1349
1350			idx = gen8_ppgtt_insert_pte(ppgtt, pdp, &iter, idx,
1351						    cache_level, flags);
1352		} while (idx);
1353
1354		vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
1355	}
 
1356}
1357
1358static int gen8_init_scratch(struct i915_address_space *vm)
1359{
1360	int ret;
1361	int i;
 
 
 
 
 
 
 
 
 
 
 
1362
1363	/*
1364	 * If everybody agrees to not to write into the scratch page,
1365	 * we can reuse it for all vm, keeping contexts and processes separate.
1366	 */
1367	if (vm->has_read_only &&
1368	    vm->i915->kernel_context &&
1369	    vm->i915->kernel_context->vm) {
1370		struct i915_address_space *clone = vm->i915->kernel_context->vm;
1371
1372		GEM_BUG_ON(!clone->has_read_only);
1373
1374		vm->scratch_order = clone->scratch_order;
1375		memcpy(vm->scratch, clone->scratch, sizeof(vm->scratch));
1376		px_dma(&vm->scratch[0]) = 0; /* no xfer of ownership */
1377		return 0;
1378	}
1379
1380	ret = setup_scratch_page(vm, __GFP_HIGHMEM);
1381	if (ret)
1382		return ret;
1383
1384	vm->scratch[0].encode =
1385		gen8_pte_encode(px_dma(&vm->scratch[0]),
1386				I915_CACHE_LLC, vm->has_read_only);
1387
1388	for (i = 1; i <= vm->top; i++) {
1389		if (unlikely(setup_page_dma(vm, px_base(&vm->scratch[i]))))
1390			goto free_scratch;
 
 
 
1391
1392		fill_px(&vm->scratch[i], vm->scratch[i - 1].encode);
1393		vm->scratch[i].encode =
1394			gen8_pde_encode(px_dma(&vm->scratch[i]),
1395					I915_CACHE_LLC);
1396	}
1397
 
 
 
1398	return 0;
1399
1400free_scratch:
1401	free_scratch(vm);
1402	return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1403}
1404
1405static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
 
 
 
1406{
1407	struct i915_address_space *vm = &ppgtt->vm;
1408	struct i915_page_directory *pd = ppgtt->pd;
1409	unsigned int idx;
 
 
 
 
 
1410
1411	GEM_BUG_ON(vm->top != 2);
1412	GEM_BUG_ON(gen8_pd_top_count(vm) != GEN8_3LVL_PDPES);
1413
1414	for (idx = 0; idx < GEN8_3LVL_PDPES; idx++) {
1415		struct i915_page_directory *pde;
 
 
1416
1417		pde = alloc_pd(vm);
1418		if (IS_ERR(pde))
1419			return PTR_ERR(pde);
1420
1421		fill_px(pde, vm->scratch[1].encode);
1422		set_pd_entry(pd, idx, pde);
1423		atomic_inc(px_used(pde)); /* keep pinned */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1424	}
1425
1426	return 0;
1427}
1428
1429static void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt)
1430{
1431	struct drm_i915_private *i915 = gt->i915;
 
 
 
 
1432
1433	ppgtt->vm.gt = gt;
1434	ppgtt->vm.i915 = i915;
1435	ppgtt->vm.dma = &i915->drm.pdev->dev;
1436	ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size);
 
 
1437
1438	i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
 
 
1439
1440	ppgtt->vm.vma_ops.bind_vma    = ppgtt_bind_vma;
1441	ppgtt->vm.vma_ops.unbind_vma  = ppgtt_unbind_vma;
1442	ppgtt->vm.vma_ops.set_pages   = ppgtt_set_pages;
1443	ppgtt->vm.vma_ops.clear_pages = clear_pages;
1444}
1445
1446static struct i915_page_directory *
1447gen8_alloc_top_pd(struct i915_address_space *vm)
1448{
1449	const unsigned int count = gen8_pd_top_count(vm);
1450	struct i915_page_directory *pd;
 
1451
1452	GEM_BUG_ON(count > ARRAY_SIZE(pd->entry));
 
 
 
 
 
1453
1454	pd = __alloc_pd(offsetof(typeof(*pd), entry[count]));
1455	if (unlikely(!pd))
1456		return ERR_PTR(-ENOMEM);
 
 
 
 
 
1457
1458	if (unlikely(setup_page_dma(vm, px_base(pd)))) {
1459		kfree(pd);
1460		return ERR_PTR(-ENOMEM);
1461	}
1462
1463	fill_page_dma(px_base(pd), vm->scratch[vm->top].encode, count);
1464	atomic_inc(px_used(pd)); /* mark as pinned */
1465	return pd;
1466}
1467
1468/*
1469 * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
1470 * with a net effect resembling a 2-level page table in normal x86 terms. Each
1471 * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
1472 * space.
1473 *
1474 */
1475static struct i915_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
1476{
1477	struct i915_ppgtt *ppgtt;
1478	int err;
1479
1480	ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
1481	if (!ppgtt)
1482		return ERR_PTR(-ENOMEM);
1483
1484	ppgtt_init(ppgtt, &i915->gt);
1485	ppgtt->vm.top = i915_vm_is_4lvl(&ppgtt->vm) ? 3 : 2;
 
 
 
 
 
 
1486
1487	/*
1488	 * From bdw, there is hw support for read-only pages in the PPGTT.
1489	 *
1490	 * Gen11 has HSDES#:1807136187 unresolved. Disable ro support
1491	 * for now.
1492	 */
1493	ppgtt->vm.has_read_only = INTEL_GEN(i915) != 11;
1494
1495	/* There are only few exceptions for gen >=6. chv and bxt.
1496	 * And we are not sure about the latter so play safe for now.
1497	 */
1498	if (IS_CHERRYVIEW(i915) || IS_BROXTON(i915))
1499		ppgtt->vm.pt_kmap_wc = true;
1500
1501	err = gen8_init_scratch(&ppgtt->vm);
1502	if (err)
1503		goto err_free;
 
 
 
1504
1505	ppgtt->pd = gen8_alloc_top_pd(&ppgtt->vm);
1506	if (IS_ERR(ppgtt->pd)) {
1507		err = PTR_ERR(ppgtt->pd);
1508		goto err_free_scratch;
1509	}
1510
1511	if (!i915_vm_is_4lvl(&ppgtt->vm)) {
1512		if (intel_vgpu_active(i915)) {
1513			err = gen8_preallocate_top_level_pdp(ppgtt);
1514			if (err)
1515				goto err_free_pd;
1516		}
1517	}
1518
1519	ppgtt->vm.insert_entries = gen8_ppgtt_insert;
1520	ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc;
1521	ppgtt->vm.clear_range = gen8_ppgtt_clear;
1522
1523	if (intel_vgpu_active(i915))
1524		gen8_ppgtt_notify_vgt(ppgtt, true);
1525
1526	ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
 
 
 
1527
1528	return ppgtt;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1529
1530err_free_pd:
1531	__gen8_ppgtt_cleanup(&ppgtt->vm, ppgtt->pd,
1532			     gen8_pd_top_count(&ppgtt->vm), ppgtt->vm.top);
1533err_free_scratch:
1534	free_scratch(&ppgtt->vm);
1535err_free:
1536	kfree(ppgtt);
1537	return ERR_PTR(err);
 
 
 
1538}
1539
1540/* Write pde (index) from the page directory @pd to the page table @pt */
1541static inline void gen6_write_pde(const struct gen6_ppgtt *ppgtt,
1542				  const unsigned int pde,
1543				  const struct i915_page_table *pt)
1544{
1545	/* Caller needs to make sure the write completes if necessary */
1546	iowrite32(GEN6_PDE_ADDR_ENCODE(px_dma(pt)) | GEN6_PDE_VALID,
1547		  ppgtt->pd_addr + pde);
 
 
 
 
 
 
1548}
1549
1550static void gen7_ppgtt_enable(struct intel_gt *gt)
 
 
 
 
1551{
1552	struct drm_i915_private *i915 = gt->i915;
1553	struct intel_uncore *uncore = gt->uncore;
1554	struct intel_engine_cs *engine;
1555	enum intel_engine_id id;
1556	u32 ecochk;
1557
1558	intel_uncore_rmw(uncore, GAC_ECO_BITS, 0, ECOBITS_PPGTT_CACHE64B);
 
 
 
 
 
 
 
 
 
 
1559
1560	ecochk = intel_uncore_read(uncore, GAM_ECOCHK);
1561	if (IS_HASWELL(i915)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1562		ecochk |= ECOCHK_PPGTT_WB_HSW;
1563	} else {
1564		ecochk |= ECOCHK_PPGTT_LLC_IVB;
1565		ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
1566	}
1567	intel_uncore_write(uncore, GAM_ECOCHK, ecochk);
1568
1569	for_each_engine(engine, i915, id) {
1570		/* GFX_MODE is per-ring on gen7+ */
1571		ENGINE_WRITE(engine,
1572			     RING_MODE_GEN7,
1573			     _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
1574	}
1575}
1576
1577static void gen6_ppgtt_enable(struct intel_gt *gt)
1578{
1579	struct intel_uncore *uncore = gt->uncore;
 
1580
1581	intel_uncore_rmw(uncore,
1582			 GAC_ECO_BITS,
1583			 0,
1584			 ECOBITS_SNB_BIT | ECOBITS_PPGTT_CACHE64B);
1585
1586	intel_uncore_rmw(uncore,
1587			 GAB_CTL,
1588			 0,
1589			 GAB_CTL_CONT_AFTER_PAGEFAULT);
1590
1591	intel_uncore_rmw(uncore,
1592			 GAM_ECOCHK,
1593			 0,
1594			 ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
1595
1596	if (HAS_PPGTT(uncore->i915)) /* may be disabled for VT-d */
1597		intel_uncore_write(uncore,
1598				   GFX_MODE,
1599				   _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
1600}
1601
1602/* PPGTT support for Sandybdrige/Gen6 and later */
1603static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
1604				   u64 start, u64 length)
1605{
1606	struct gen6_ppgtt * const ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
1607	const unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
1608	const gen6_pte_t scratch_pte = vm->scratch[0].encode;
1609	unsigned int pde = first_entry / GEN6_PTES;
1610	unsigned int pte = first_entry % GEN6_PTES;
1611	unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
 
 
 
 
 
 
 
1612
1613	while (num_entries) {
1614		struct i915_page_table * const pt =
1615			i915_pt_entry(ppgtt->base.pd, pde++);
1616		const unsigned int count = min(num_entries, GEN6_PTES - pte);
1617		gen6_pte_t *vaddr;
1618
1619		GEM_BUG_ON(px_base(pt) == px_base(&vm->scratch[1]));
1620
1621		num_entries -= count;
1622
1623		GEM_BUG_ON(count > atomic_read(&pt->used));
1624		if (!atomic_sub_return(count, &pt->used))
1625			ppgtt->scan_for_unused_pt = true;
1626
1627		/*
1628		 * Note that the hw doesn't support removing PDE on the fly
1629		 * (they are cached inside the context with no means to
1630		 * invalidate the cache), so we can only reset the PTE
1631		 * entries back to scratch.
1632		 */
1633
1634		vaddr = kmap_atomic_px(pt);
1635		memset32(vaddr + pte, scratch_pte, count);
1636		kunmap_atomic(vaddr);
1637
1638		pte = 0;
 
 
1639	}
1640}
1641
1642static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
1643				      struct i915_vma *vma,
1644				      enum i915_cache_level cache_level,
1645				      u32 flags)
1646{
1647	struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1648	struct i915_page_directory * const pd = ppgtt->pd;
1649	unsigned first_entry = vma->node.start / I915_GTT_PAGE_SIZE;
 
1650	unsigned act_pt = first_entry / GEN6_PTES;
1651	unsigned act_pte = first_entry % GEN6_PTES;
1652	const u32 pte_encode = vm->pte_encode(0, cache_level, flags);
1653	struct sgt_dma iter = sgt_dma(vma);
1654	gen6_pte_t *vaddr;
1655
1656	GEM_BUG_ON(pd->entry[act_pt] == &vm->scratch[1]);
1657
1658	vaddr = kmap_atomic_px(i915_pt_entry(pd, act_pt));
1659	do {
1660		vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
1661
1662		iter.dma += I915_GTT_PAGE_SIZE;
1663		if (iter.dma == iter.max) {
1664			iter.sg = __sg_next(iter.sg);
1665			if (!iter.sg)
1666				break;
1667
1668			iter.dma = sg_dma_address(iter.sg);
1669			iter.max = iter.dma + iter.sg->length;
1670		}
 
 
 
 
 
1671
1672		if (++act_pte == GEN6_PTES) {
1673			kunmap_atomic(vaddr);
1674			vaddr = kmap_atomic_px(i915_pt_entry(pd, ++act_pt));
 
1675			act_pte = 0;
1676		}
1677	} while (1);
1678	kunmap_atomic(vaddr);
1679
1680	vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
1681}
1682
1683static int gen6_alloc_va_range(struct i915_address_space *vm,
1684			       u64 start, u64 length)
1685{
1686	struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
1687	struct i915_page_directory * const pd = ppgtt->base.pd;
1688	struct i915_page_table *pt, *alloc = NULL;
1689	intel_wakeref_t wakeref;
1690	u64 from = start;
1691	unsigned int pde;
1692	bool flush = false;
1693	int ret = 0;
 
 
 
 
 
 
 
1694
1695	wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
1696
1697	spin_lock(&pd->lock);
1698	gen6_for_each_pde(pt, pd, start, length, pde) {
1699		const unsigned int count = gen6_pte_count(start, length);
1700
1701		if (px_base(pt) == px_base(&vm->scratch[1])) {
1702			spin_unlock(&pd->lock);
1703
1704			pt = fetch_and_zero(&alloc);
1705			if (!pt)
1706				pt = alloc_pt(vm);
1707			if (IS_ERR(pt)) {
1708				ret = PTR_ERR(pt);
1709				goto unwind_out;
1710			}
1711
1712			fill32_px(pt, vm->scratch[0].encode);
 
1713
1714			spin_lock(&pd->lock);
1715			if (pd->entry[pde] == &vm->scratch[1]) {
1716				pd->entry[pde] = pt;
1717				if (i915_vma_is_bound(ppgtt->vma,
1718						      I915_VMA_GLOBAL_BIND)) {
1719					gen6_write_pde(ppgtt, pde, pt);
1720					flush = true;
1721				}
1722			} else {
1723				alloc = pt;
1724				pt = pd->entry[pde];
1725			}
1726		}
1727
1728		atomic_add(count, &pt->used);
 
 
 
 
1729	}
1730	spin_unlock(&pd->lock);
1731
1732	if (flush) {
1733		mark_tlbs_dirty(&ppgtt->base);
1734		gen6_ggtt_invalidate(vm->gt->ggtt);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1735	}
1736
1737	goto out;
 
 
 
 
 
 
 
1738
1739unwind_out:
1740	gen6_ppgtt_clear_range(vm, from, start - from);
1741out:
1742	if (alloc)
1743		free_px(vm, alloc);
1744	intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
 
 
 
1745	return ret;
1746}
1747
1748static int gen6_ppgtt_init_scratch(struct gen6_ppgtt *ppgtt)
1749{
1750	struct i915_address_space * const vm = &ppgtt->base.vm;
1751	struct i915_page_directory * const pd = ppgtt->base.pd;
1752	int ret;
1753
1754	ret = setup_scratch_page(vm, __GFP_HIGHMEM);
1755	if (ret)
1756		return ret;
1757
1758	vm->scratch[0].encode =
1759		vm->pte_encode(px_dma(&vm->scratch[0]),
1760			       I915_CACHE_NONE, PTE_READ_ONLY);
1761
1762	if (unlikely(setup_page_dma(vm, px_base(&vm->scratch[1])))) {
1763		cleanup_scratch_page(vm);
1764		return -ENOMEM;
 
1765	}
1766
1767	fill32_px(&vm->scratch[1], vm->scratch[0].encode);
1768	memset_p(pd->entry, &vm->scratch[1], I915_PDES);
1769
1770	return 0;
1771}
1772
1773static void gen6_ppgtt_free_pd(struct gen6_ppgtt *ppgtt)
1774{
1775	struct i915_page_directory * const pd = ppgtt->base.pd;
1776	struct i915_page_dma * const scratch =
1777		px_base(&ppgtt->base.vm.scratch[1]);
1778	struct i915_page_table *pt;
1779	u32 pde;
1780
1781	gen6_for_all_pdes(pt, pd, pde)
1782		if (px_base(pt) != scratch)
1783			free_px(&ppgtt->base.vm, pt);
1784}
1785
1786static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
1787{
1788	struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
1789	struct drm_i915_private *i915 = vm->i915;
 
 
1790
1791	/* FIXME remove the struct_mutex to bring the locking under control */
1792	mutex_lock(&i915->drm.struct_mutex);
1793	i915_vma_destroy(ppgtt->vma);
1794	mutex_unlock(&i915->drm.struct_mutex);
1795
1796	gen6_ppgtt_free_pd(ppgtt);
1797	free_scratch(vm);
1798	kfree(ppgtt->base.pd);
 
 
 
1799}
1800
1801static int pd_vma_set_pages(struct i915_vma *vma)
1802{
1803	vma->pages = ERR_PTR(-ENODEV);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1804	return 0;
 
 
 
 
1805}
1806
1807static void pd_vma_clear_pages(struct i915_vma *vma)
1808{
1809	GEM_BUG_ON(!vma->pages);
1810
1811	vma->pages = NULL;
1812}
1813
1814static int pd_vma_bind(struct i915_vma *vma,
1815		       enum i915_cache_level cache_level,
1816		       u32 unused)
1817{
1818	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm);
1819	struct gen6_ppgtt *ppgtt = vma->private;
1820	u32 ggtt_offset = i915_ggtt_offset(vma) / I915_GTT_PAGE_SIZE;
1821	struct i915_page_table *pt;
1822	unsigned int pde;
1823
1824	px_base(ppgtt->base.pd)->ggtt_offset = ggtt_offset * sizeof(gen6_pte_t);
1825	ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm + ggtt_offset;
1826
1827	gen6_for_all_pdes(pt, ppgtt->base.pd, pde)
1828		gen6_write_pde(ppgtt, pde, pt);
1829
1830	mark_tlbs_dirty(&ppgtt->base);
1831	gen6_ggtt_invalidate(ggtt);
1832
1833	return 0;
1834}
1835
1836static void pd_vma_unbind(struct i915_vma *vma)
1837{
1838	struct gen6_ppgtt *ppgtt = vma->private;
1839	struct i915_page_directory * const pd = ppgtt->base.pd;
1840	struct i915_page_dma * const scratch =
1841		px_base(&ppgtt->base.vm.scratch[1]);
1842	struct i915_page_table *pt;
1843	unsigned int pde;
1844
1845	if (!ppgtt->scan_for_unused_pt)
1846		return;
 
 
 
 
 
 
 
1847
1848	/* Free all no longer used page tables */
1849	gen6_for_all_pdes(pt, ppgtt->base.pd, pde) {
1850		if (px_base(pt) == scratch || atomic_read(&pt->used))
1851			continue;
1852
1853		free_px(&ppgtt->base.vm, pt);
1854		pd->entry[pde] = scratch;
1855	}
1856
1857	ppgtt->scan_for_unused_pt = false;
1858}
 
 
 
 
 
 
 
1859
1860static const struct i915_vma_ops pd_vma_ops = {
1861	.set_pages = pd_vma_set_pages,
1862	.clear_pages = pd_vma_clear_pages,
1863	.bind_vma = pd_vma_bind,
1864	.unbind_vma = pd_vma_unbind,
1865};
1866
1867static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size)
1868{
1869	struct drm_i915_private *i915 = ppgtt->base.vm.i915;
1870	struct i915_ggtt *ggtt = ppgtt->base.vm.gt->ggtt;
1871	struct i915_vma *vma;
1872
1873	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
1874	GEM_BUG_ON(size > ggtt->vm.total);
1875
1876	vma = i915_vma_alloc();
1877	if (!vma)
1878		return ERR_PTR(-ENOMEM);
1879
1880	i915_active_init(i915, &vma->active, NULL, NULL);
 
 
1881
1882	vma->vm = &ggtt->vm;
1883	vma->ops = &pd_vma_ops;
1884	vma->private = ppgtt;
1885
1886	vma->size = size;
1887	vma->fence_size = size;
1888	vma->flags = I915_VMA_GGTT;
1889	vma->ggtt_view.type = I915_GGTT_VIEW_ROTATED; /* prevent fencing */
1890
1891	INIT_LIST_HEAD(&vma->obj_link);
1892	INIT_LIST_HEAD(&vma->closed_link);
1893
1894	mutex_lock(&vma->vm->mutex);
1895	list_add(&vma->vm_link, &vma->vm->unbound_list);
1896	mutex_unlock(&vma->vm->mutex);
1897
1898	return vma;
 
 
 
1899}
1900
1901int gen6_ppgtt_pin(struct i915_ppgtt *base)
 
1902{
1903	struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
1904	int err;
 
 
 
 
1905
1906	GEM_BUG_ON(ppgtt->base.vm.closed);
 
 
1907
1908	/*
1909	 * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt
1910	 * which will be pinned into every active context.
1911	 * (When vma->pin_count becomes atomic, I expect we will naturally
1912	 * need a larger, unpacked, type and kill this redundancy.)
1913	 */
1914	if (ppgtt->pin_count++)
1915		return 0;
 
 
 
 
 
 
 
 
1916
1917	/*
1918	 * PPGTT PDEs reside in the GGTT and consists of 512 entries. The
1919	 * allocator works in address space sizes, so it's multiplied by page
1920	 * size. We allocate at the top of the GTT to avoid fragmentation.
1921	 */
1922	err = i915_vma_pin(ppgtt->vma,
1923			   0, GEN6_PD_ALIGN,
1924			   PIN_GLOBAL | PIN_HIGH);
1925	if (err)
1926		goto unpin;
1927
1928	return 0;
 
 
 
 
1929
1930unpin:
1931	ppgtt->pin_count = 0;
1932	return err;
1933}
1934
1935void gen6_ppgtt_unpin(struct i915_ppgtt *base)
1936{
1937	struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
1938
1939	GEM_BUG_ON(!ppgtt->pin_count);
1940	if (--ppgtt->pin_count)
1941		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1942
1943	i915_vma_unpin(ppgtt->vma);
1944}
1945
1946void gen6_ppgtt_unpin_all(struct i915_ppgtt *base)
1947{
1948	struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
 
1949
1950	if (!ppgtt->pin_count)
1951		return;
 
 
 
1952
1953	ppgtt->pin_count = 0;
1954	i915_vma_unpin(ppgtt->vma);
1955}
1956
1957static struct i915_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
 
1958{
1959	struct i915_ggtt * const ggtt = &i915->ggtt;
1960	struct gen6_ppgtt *ppgtt;
1961	int err;
1962
1963	ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
1964	if (!ppgtt)
1965		return ERR_PTR(-ENOMEM);
1966
1967	ppgtt_init(&ppgtt->base, &i915->gt);
1968	ppgtt->base.vm.top = 1;
 
 
 
 
 
1969
1970	ppgtt->base.vm.allocate_va_range = gen6_alloc_va_range;
1971	ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range;
1972	ppgtt->base.vm.insert_entries = gen6_ppgtt_insert_entries;
1973	ppgtt->base.vm.cleanup = gen6_ppgtt_cleanup;
1974
1975	ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode;
 
1976
1977	ppgtt->base.pd = __alloc_pd(sizeof(*ppgtt->base.pd));
1978	if (!ppgtt->base.pd) {
1979		err = -ENOMEM;
1980		goto err_free;
1981	}
1982
1983	err = gen6_ppgtt_init_scratch(ppgtt);
1984	if (err)
1985		goto err_pd;
1986
1987	ppgtt->vma = pd_vma_create(ppgtt, GEN6_PD_SIZE);
1988	if (IS_ERR(ppgtt->vma)) {
1989		err = PTR_ERR(ppgtt->vma);
1990		goto err_scratch;
1991	}
1992
1993	return &ppgtt->base;
 
1994
1995err_scratch:
1996	free_scratch(&ppgtt->base.vm);
1997err_pd:
1998	kfree(ppgtt->base.pd);
1999err_free:
2000	kfree(ppgtt);
2001	return ERR_PTR(err);
2002}
2003
2004static void gtt_write_workarounds(struct intel_gt *gt)
 
 
 
 
2005{
2006	struct drm_i915_private *i915 = gt->i915;
2007	struct intel_uncore *uncore = gt->uncore;
2008
2009	/* This function is for gtt related workarounds. This function is
2010	 * called on driver load and after a GPU reset, so you can place
2011	 * workarounds here even if they get overwritten by GPU reset.
2012	 */
2013	/* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl,cnl,icl */
2014	if (IS_BROADWELL(i915))
2015		intel_uncore_write(uncore,
2016				   GEN8_L3_LRA_1_GPGPU,
2017				   GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
2018	else if (IS_CHERRYVIEW(i915))
2019		intel_uncore_write(uncore,
2020				   GEN8_L3_LRA_1_GPGPU,
2021				   GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
2022	else if (IS_GEN9_LP(i915))
2023		intel_uncore_write(uncore,
2024				   GEN8_L3_LRA_1_GPGPU,
2025				   GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
2026	else if (INTEL_GEN(i915) >= 9)
2027		intel_uncore_write(uncore,
2028				   GEN8_L3_LRA_1_GPGPU,
2029				   GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
2030
2031	/*
2032	 * To support 64K PTEs we need to first enable the use of the
2033	 * Intermediate-Page-Size(IPS) bit of the PDE field via some magical
2034	 * mmio, otherwise the page-walker will simply ignore the IPS bit. This
2035	 * shouldn't be needed after GEN10.
2036	 *
2037	 * 64K pages were first introduced from BDW+, although technically they
2038	 * only *work* from gen9+. For pre-BDW we instead have the option for
2039	 * 32K pages, but we don't currently have any support for it in our
2040	 * driver.
2041	 */
2042	if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K) &&
2043	    INTEL_GEN(i915) <= 10)
2044		intel_uncore_rmw(uncore,
2045				 GEN8_GAMW_ECO_DEV_RW_IA,
2046				 0,
2047				 GAMW_ECO_ENABLE_64K_IPS_FIELD);
2048
2049	if (IS_GEN_RANGE(i915, 8, 11)) {
2050		bool can_use_gtt_cache = true;
2051
2052		/*
2053		 * According to the BSpec if we use 2M/1G pages then we also
2054		 * need to disable the GTT cache. At least on BDW we can see
2055		 * visual corruption when using 2M pages, and not disabling the
2056		 * GTT cache.
2057		 */
2058		if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_2M))
2059			can_use_gtt_cache = false;
2060
2061		/* WaGttCachingOffByDefault */
2062		intel_uncore_write(uncore,
2063				   HSW_GTT_CACHE_EN,
2064				   can_use_gtt_cache ? GTT_CACHE_EN_ALL : 0);
2065		WARN_ON_ONCE(can_use_gtt_cache &&
2066			     intel_uncore_read(uncore,
2067					       HSW_GTT_CACHE_EN) == 0);
2068	}
2069}
2070
2071int i915_ppgtt_init_hw(struct intel_gt *gt)
2072{
2073	struct drm_i915_private *i915 = gt->i915;
2074
2075	gtt_write_workarounds(gt);
 
 
 
 
 
 
 
2076
2077	if (IS_GEN(i915, 6))
2078		gen6_ppgtt_enable(gt);
2079	else if (IS_GEN(i915, 7))
2080		gen7_ppgtt_enable(gt);
2081
2082	return 0;
2083}
2084
2085static struct i915_ppgtt *
2086__ppgtt_create(struct drm_i915_private *i915)
2087{
2088	if (INTEL_GEN(i915) < 8)
2089		return gen6_ppgtt_create(i915);
2090	else
2091		return gen8_ppgtt_create(i915);
2092}
2093
2094struct i915_ppgtt *
2095i915_ppgtt_create(struct drm_i915_private *i915)
2096{
2097	struct i915_ppgtt *ppgtt;
 
 
2098
2099	ppgtt = __ppgtt_create(i915);
2100	if (IS_ERR(ppgtt))
2101		return ppgtt;
2102
2103	trace_i915_ppgtt_create(&ppgtt->vm);
2104
2105	return ppgtt;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2106}
2107
2108/* Certain Gen5 chipsets require require idling the GPU before
2109 * unmapping anything from the GTT when VT-d is enabled.
2110 */
2111static bool needs_idle_maps(struct drm_i915_private *dev_priv)
2112{
2113	/* Query intel_iommu to see if we need the workaround. Presumably that
2114	 * was loaded first.
2115	 */
2116	return IS_GEN(dev_priv, 5) && IS_MOBILE(dev_priv) && intel_vtd_active();
 
 
2117}
2118
2119static void ggtt_suspend_mappings(struct i915_ggtt *ggtt)
2120{
2121	struct drm_i915_private *i915 = ggtt->vm.i915;
2122
2123	/* Don't bother messing with faults pre GEN6 as we have little
2124	 * documentation supporting that it's a good idea.
2125	 */
2126	if (INTEL_GEN(i915) < 6)
2127		return;
2128
2129	intel_gt_check_and_clear_faults(ggtt->vm.gt);
2130
2131	ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
 
 
 
2132
2133	ggtt->invalidate(ggtt);
2134}
2135
2136void i915_gem_suspend_gtt_mappings(struct drm_i915_private *i915)
2137{
2138	ggtt_suspend_mappings(&i915->ggtt);
2139}
 
 
2140
2141int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
2142			       struct sg_table *pages)
2143{
2144	do {
2145		if (dma_map_sg_attrs(&obj->base.dev->pdev->dev,
2146				     pages->sgl, pages->nents,
2147				     PCI_DMA_BIDIRECTIONAL,
2148				     DMA_ATTR_NO_WARN))
2149			return 0;
2150
2151		/*
2152		 * If the DMA remap fails, one cause can be that we have
2153		 * too many objects pinned in a small remapping table,
2154		 * such as swiotlb. Incrementally purge all other objects and
2155		 * try again - if there are no more pages to remove from
2156		 * the DMA remapper, i915_gem_shrink will return 0.
2157		 */
2158		GEM_BUG_ON(obj->mm.pages == pages);
2159	} while (i915_gem_shrink(to_i915(obj->base.dev),
2160				 obj->base.size >> PAGE_SHIFT, NULL,
2161				 I915_SHRINK_BOUND |
2162				 I915_SHRINK_UNBOUND));
2163
2164	return -ENOSPC;
2165}
2166
2167static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
2168{
 
2169	writeq(pte, addr);
2170}
2171
2172static void gen8_ggtt_insert_page(struct i915_address_space *vm,
2173				  dma_addr_t addr,
2174				  u64 offset,
2175				  enum i915_cache_level level,
2176				  u32 unused)
2177{
2178	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2179	gen8_pte_t __iomem *pte =
2180		(gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
2181
2182	gen8_set_pte(pte, gen8_pte_encode(addr, level, 0));
2183
2184	ggtt->invalidate(ggtt);
2185}
2186
2187static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
2188				     struct i915_vma *vma,
2189				     enum i915_cache_level level,
2190				     u32 flags)
2191{
2192	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2193	struct sgt_iter sgt_iter;
2194	gen8_pte_t __iomem *gtt_entries;
2195	const gen8_pte_t pte_encode = gen8_pte_encode(0, level, 0);
2196	dma_addr_t addr;
 
 
 
 
 
 
 
 
 
 
 
 
 
2197
2198	/*
2199	 * Note that we ignore PTE_READ_ONLY here. The caller must be careful
2200	 * not to allow the user to override access to a read only page.
 
 
 
 
 
 
 
 
 
 
 
2201	 */
 
 
 
 
 
2202
2203	gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm;
2204	gtt_entries += vma->node.start / I915_GTT_PAGE_SIZE;
2205	for_each_sgt_dma(addr, sgt_iter, vma->pages)
2206		gen8_set_pte(gtt_entries++, pte_encode | addr);
 
 
 
2207
2208	/*
2209	 * We want to flush the TLBs only after we're certain all the PTE
2210	 * updates have finished.
2211	 */
2212	ggtt->invalidate(ggtt);
 
2213}
2214
2215static void gen6_ggtt_insert_page(struct i915_address_space *vm,
2216				  dma_addr_t addr,
2217				  u64 offset,
2218				  enum i915_cache_level level,
2219				  u32 flags)
2220{
2221	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2222	gen6_pte_t __iomem *pte =
2223		(gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
2224
2225	iowrite32(vm->pte_encode(addr, level, flags), pte);
2226
2227	ggtt->invalidate(ggtt);
2228}
2229
2230/*
2231 * Binds an object into the global gtt with the specified cache level. The object
2232 * will be accessible to the GPU via commands whose operands reference offsets
2233 * within the global GTT as well as accessible by the GPU through the GMADR
2234 * mapped BAR (dev_priv->mm.gtt->gtt).
2235 */
2236static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
2237				     struct i915_vma *vma,
2238				     enum i915_cache_level level,
2239				     u32 flags)
2240{
2241	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2242	gen6_pte_t __iomem *entries = (gen6_pte_t __iomem *)ggtt->gsm;
2243	unsigned int i = vma->node.start / I915_GTT_PAGE_SIZE;
2244	struct sgt_iter iter;
2245	dma_addr_t addr;
2246	for_each_sgt_dma(addr, iter, vma->pages)
2247		iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]);
2248
2249	/*
2250	 * We want to flush the TLBs only after we're certain all the PTE
2251	 * updates have finished.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2252	 */
2253	ggtt->invalidate(ggtt);
2254}
2255
2256static void nop_clear_range(struct i915_address_space *vm,
2257			    u64 start, u64 length)
2258{
2259}
2260
2261static void gen8_ggtt_clear_range(struct i915_address_space *vm,
2262				  u64 start, u64 length)
2263{
2264	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2265	unsigned first_entry = start / I915_GTT_PAGE_SIZE;
2266	unsigned num_entries = length / I915_GTT_PAGE_SIZE;
2267	const gen8_pte_t scratch_pte = vm->scratch[0].encode;
2268	gen8_pte_t __iomem *gtt_base =
2269		(gen8_pte_t __iomem *)ggtt->gsm + first_entry;
2270	const int max_entries = ggtt_total_entries(ggtt) - first_entry;
 
2271	int i;
 
 
 
2272
2273	if (WARN(num_entries > max_entries,
2274		 "First entry = %d; Num entries = %d (max=%d)\n",
2275		 first_entry, num_entries, max_entries))
2276		num_entries = max_entries;
2277
 
 
 
2278	for (i = 0; i < num_entries; i++)
2279		gen8_set_pte(&gtt_base[i], scratch_pte);
2280}
2281
2282static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
2283{
2284	struct drm_i915_private *dev_priv = vm->i915;
2285
2286	/*
2287	 * Make sure the internal GAM fifo has been cleared of all GTT
2288	 * writes before exiting stop_machine(). This guarantees that
2289	 * any aperture accesses waiting to start in another process
2290	 * cannot back up behind the GTT writes causing a hang.
2291	 * The register can be any arbitrary GAM register.
2292	 */
2293	POSTING_READ(GFX_FLSH_CNTL_GEN6);
2294}
2295
2296struct insert_page {
2297	struct i915_address_space *vm;
2298	dma_addr_t addr;
2299	u64 offset;
2300	enum i915_cache_level level;
2301};
2302
2303static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
2304{
2305	struct insert_page *arg = _arg;
2306
2307	gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0);
2308	bxt_vtd_ggtt_wa(arg->vm);
2309
2310	return 0;
2311}
2312
2313static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
2314					  dma_addr_t addr,
2315					  u64 offset,
2316					  enum i915_cache_level level,
2317					  u32 unused)
2318{
2319	struct insert_page arg = { vm, addr, offset, level };
2320
2321	stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
2322}
2323
2324struct insert_entries {
2325	struct i915_address_space *vm;
2326	struct i915_vma *vma;
2327	enum i915_cache_level level;
2328	u32 flags;
2329};
2330
2331static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
2332{
2333	struct insert_entries *arg = _arg;
2334
2335	gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, arg->flags);
2336	bxt_vtd_ggtt_wa(arg->vm);
2337
2338	return 0;
2339}
2340
2341static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
2342					     struct i915_vma *vma,
2343					     enum i915_cache_level level,
2344					     u32 flags)
2345{
2346	struct insert_entries arg = { vm, vma, level, flags };
2347
2348	stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
2349}
2350
2351struct clear_range {
2352	struct i915_address_space *vm;
2353	u64 start;
2354	u64 length;
2355};
2356
2357static int bxt_vtd_ggtt_clear_range__cb(void *_arg)
2358{
2359	struct clear_range *arg = _arg;
2360
2361	gen8_ggtt_clear_range(arg->vm, arg->start, arg->length);
2362	bxt_vtd_ggtt_wa(arg->vm);
2363
2364	return 0;
2365}
2366
2367static void bxt_vtd_ggtt_clear_range__BKL(struct i915_address_space *vm,
2368					  u64 start,
2369					  u64 length)
2370{
2371	struct clear_range arg = { vm, start, length };
2372
2373	stop_machine(bxt_vtd_ggtt_clear_range__cb, &arg, NULL);
2374}
2375
2376static void gen6_ggtt_clear_range(struct i915_address_space *vm,
2377				  u64 start, u64 length)
2378{
2379	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2380	unsigned first_entry = start / I915_GTT_PAGE_SIZE;
2381	unsigned num_entries = length / I915_GTT_PAGE_SIZE;
 
 
2382	gen6_pte_t scratch_pte, __iomem *gtt_base =
2383		(gen6_pte_t __iomem *)ggtt->gsm + first_entry;
2384	const int max_entries = ggtt_total_entries(ggtt) - first_entry;
2385	int i;
 
 
 
2386
2387	if (WARN(num_entries > max_entries,
2388		 "First entry = %d; Num entries = %d (max=%d)\n",
2389		 first_entry, num_entries, max_entries))
2390		num_entries = max_entries;
2391
2392	scratch_pte = vm->scratch[0].encode;
 
 
2393	for (i = 0; i < num_entries; i++)
2394		iowrite32(scratch_pte, &gtt_base[i]);
 
 
 
2395}
2396
2397static void i915_ggtt_insert_page(struct i915_address_space *vm,
2398				  dma_addr_t addr,
2399				  u64 offset,
2400				  enum i915_cache_level cache_level,
2401				  u32 unused)
2402{
 
2403	unsigned int flags = (cache_level == I915_CACHE_NONE) ?
2404		AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
 
 
 
2405
2406	intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
2407}
2408
2409static void i915_ggtt_insert_entries(struct i915_address_space *vm,
2410				     struct i915_vma *vma,
2411				     enum i915_cache_level cache_level,
2412				     u32 unused)
2413{
2414	unsigned int flags = (cache_level == I915_CACHE_NONE) ?
2415		AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
2416
2417	intel_gtt_insert_sg_entries(vma->pages, vma->node.start >> PAGE_SHIFT,
2418				    flags);
2419}
2420
2421static void i915_ggtt_clear_range(struct i915_address_space *vm,
2422				  u64 start, u64 length)
 
 
2423{
2424	intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
 
 
 
 
 
 
 
 
 
2425}
2426
2427static int ggtt_bind_vma(struct i915_vma *vma,
2428			 enum i915_cache_level cache_level,
2429			 u32 flags)
2430{
2431	struct drm_i915_private *i915 = vma->vm->i915;
2432	struct drm_i915_gem_object *obj = vma->obj;
2433	intel_wakeref_t wakeref;
2434	u32 pte_flags;
2435
2436	/* Applicable to VLV (gen8+ do not support RO in the GGTT) */
2437	pte_flags = 0;
2438	if (i915_gem_object_is_readonly(obj))
 
 
 
2439		pte_flags |= PTE_READ_ONLY;
2440
2441	with_intel_runtime_pm(&i915->runtime_pm, wakeref)
2442		vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
2443
2444	vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
2445
2446	/*
2447	 * Without aliasing PPGTT there's no difference between
2448	 * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
2449	 * upgrade to both bound if we bind either to avoid double-binding.
2450	 */
2451	vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
2452
2453	return 0;
2454}
2455
2456static void ggtt_unbind_vma(struct i915_vma *vma)
2457{
2458	struct drm_i915_private *i915 = vma->vm->i915;
2459	intel_wakeref_t wakeref;
2460
2461	with_intel_runtime_pm(&i915->runtime_pm, wakeref)
2462		vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
2463}
2464
2465static int aliasing_gtt_bind_vma(struct i915_vma *vma,
2466				 enum i915_cache_level cache_level,
2467				 u32 flags)
2468{
2469	struct drm_i915_private *i915 = vma->vm->i915;
2470	u32 pte_flags;
 
 
 
2471	int ret;
2472
 
 
 
 
 
2473	/* Currently applicable only to VLV */
2474	pte_flags = 0;
2475	if (i915_gem_object_is_readonly(vma->obj))
2476		pte_flags |= PTE_READ_ONLY;
2477
2478	if (flags & I915_VMA_LOCAL_BIND) {
2479		struct i915_ppgtt *alias = i915_vm_to_ggtt(vma->vm)->alias;
2480
2481		if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
2482			ret = alias->vm.allocate_va_range(&alias->vm,
2483							  vma->node.start,
2484							  vma->size);
2485			if (ret)
2486				return ret;
2487		}
2488
2489		alias->vm.insert_entries(&alias->vm, vma,
2490					 cache_level, pte_flags);
 
 
2491	}
2492
2493	if (flags & I915_VMA_GLOBAL_BIND) {
2494		intel_wakeref_t wakeref;
2495
2496		with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
2497			vma->vm->insert_entries(vma->vm, vma,
2498						cache_level, pte_flags);
2499		}
2500	}
2501
2502	return 0;
2503}
2504
2505static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
2506{
2507	struct drm_i915_private *i915 = vma->vm->i915;
2508
2509	if (vma->flags & I915_VMA_GLOBAL_BIND) {
2510		struct i915_address_space *vm = vma->vm;
2511		intel_wakeref_t wakeref;
2512
2513		with_intel_runtime_pm(&i915->runtime_pm, wakeref)
2514			vm->clear_range(vm, vma->node.start, vma->size);
2515	}
2516
2517	if (vma->flags & I915_VMA_LOCAL_BIND) {
2518		struct i915_address_space *vm =
2519			&i915_vm_to_ggtt(vma->vm)->alias->vm;
2520
2521		vm->clear_range(vm, vma->node.start, vma->size);
 
 
 
 
2522	}
2523}
2524
2525void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
2526			       struct sg_table *pages)
2527{
2528	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2529	struct device *kdev = &dev_priv->drm.pdev->dev;
2530	struct i915_ggtt *ggtt = &dev_priv->ggtt;
2531
2532	if (unlikely(ggtt->do_idle_maps)) {
2533		if (i915_gem_wait_for_idle(dev_priv, 0, MAX_SCHEDULE_TIMEOUT)) {
2534			DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
2535			/* Wait a bit, in hopes it avoids the hang */
2536			udelay(10);
2537		}
2538	}
2539
2540	dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL);
2541}
2542
2543static int ggtt_set_pages(struct i915_vma *vma)
2544{
2545	int ret;
 
 
2546
2547	GEM_BUG_ON(vma->pages);
2548
2549	ret = i915_get_ggtt_vma_pages(vma);
2550	if (ret)
2551		return ret;
2552
2553	vma->page_sizes = vma->obj->mm.page_sizes;
 
2554
2555	return 0;
2556}
2557
2558static void i915_gtt_color_adjust(const struct drm_mm_node *node,
2559				  unsigned long color,
2560				  u64 *start,
2561				  u64 *end)
2562{
2563	if (node->allocated && node->color != color)
2564		*start += I915_GTT_PAGE_SIZE;
2565
2566	/* Also leave a space between the unallocated reserved node after the
2567	 * GTT and any objects within the GTT, i.e. we use the color adjustment
2568	 * to insert a guard page to prevent prefetches crossing over the
2569	 * GTT boundary.
2570	 */
2571	node = list_next_entry(node, node_list);
2572	if (node->color != color)
2573		*end -= I915_GTT_PAGE_SIZE;
2574}
2575
2576static int init_aliasing_ppgtt(struct i915_ggtt *ggtt)
2577{
2578	struct i915_ppgtt *ppgtt;
2579	int err;
2580
2581	ppgtt = i915_ppgtt_create(ggtt->vm.i915);
2582	if (IS_ERR(ppgtt))
2583		return PTR_ERR(ppgtt);
2584
2585	if (GEM_WARN_ON(ppgtt->vm.total < ggtt->vm.total)) {
2586		err = -ENODEV;
2587		goto err_ppgtt;
 
 
 
2588	}
2589
2590	/*
2591	 * Note we only pre-allocate as far as the end of the global
2592	 * GTT. On 48b / 4-level page-tables, the difference is very,
2593	 * very significant! We have to preallocate as GVT/vgpu does
2594	 * not like the page directory disappearing.
2595	 */
2596	err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, ggtt->vm.total);
2597	if (err)
2598		goto err_ppgtt;
2599
2600	ggtt->alias = ppgtt;
2601
2602	GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != ggtt_bind_vma);
2603	ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma;
2604
2605	GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != ggtt_unbind_vma);
2606	ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma;
2607
2608	return 0;
2609
2610err_ppgtt:
2611	i915_vm_put(&ppgtt->vm);
2612	return err;
2613}
2614
2615static void fini_aliasing_ppgtt(struct i915_ggtt *ggtt)
2616{
2617	struct drm_i915_private *i915 = ggtt->vm.i915;
2618	struct i915_ppgtt *ppgtt;
2619
2620	mutex_lock(&i915->drm.struct_mutex);
2621
2622	ppgtt = fetch_and_zero(&ggtt->alias);
2623	if (!ppgtt)
2624		goto out;
2625
2626	i915_vm_put(&ppgtt->vm);
2627
2628	ggtt->vm.vma_ops.bind_vma   = ggtt_bind_vma;
2629	ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
2630
2631out:
2632	mutex_unlock(&i915->drm.struct_mutex);
2633}
2634
2635static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt)
2636{
2637	u64 size;
2638	int ret;
2639
2640	if (!USES_GUC(ggtt->vm.i915))
2641		return 0;
2642
2643	GEM_BUG_ON(ggtt->vm.total <= GUC_GGTT_TOP);
2644	size = ggtt->vm.total - GUC_GGTT_TOP;
2645
2646	ret = i915_gem_gtt_reserve(&ggtt->vm, &ggtt->uc_fw, size,
2647				   GUC_GGTT_TOP, I915_COLOR_UNEVICTABLE,
2648				   PIN_NOEVICT);
2649	if (ret)
2650		DRM_DEBUG_DRIVER("Failed to reserve top of GGTT for GuC\n");
2651
2652	return ret;
2653}
2654
2655static void ggtt_release_guc_top(struct i915_ggtt *ggtt)
2656{
2657	if (drm_mm_node_allocated(&ggtt->uc_fw))
2658		drm_mm_remove_node(&ggtt->uc_fw);
2659}
2660
2661static void cleanup_init_ggtt(struct i915_ggtt *ggtt)
2662{
2663	ggtt_release_guc_top(ggtt);
2664	drm_mm_remove_node(&ggtt->error_capture);
2665}
2666
2667static int init_ggtt(struct i915_ggtt *ggtt)
2668{
2669	/* Let GEM Manage all of the aperture.
2670	 *
2671	 * However, leave one page at the end still bound to the scratch page.
2672	 * There are a number of places where the hardware apparently prefetches
2673	 * past the end of the object, and we've seen multiple hangs with the
2674	 * GPU head pointer stuck in a batchbuffer bound at the last page of the
2675	 * aperture.  One page should be enough to keep any prefetching inside
2676	 * of the aperture.
2677	 */
 
 
 
 
2678	unsigned long hole_start, hole_end;
2679	struct drm_mm_node *entry;
2680	int ret;
2681
2682	/*
2683	 * GuC requires all resources that we're sharing with it to be placed in
2684	 * non-WOPCM memory. If GuC is not present or not in use we still need a
2685	 * small bias as ring wraparound at offset 0 sometimes hangs. No idea
2686	 * why.
2687	 */
2688	ggtt->pin_bias = max_t(u32, I915_GTT_PAGE_SIZE,
2689			       intel_wopcm_guc_size(&ggtt->vm.i915->wopcm));
 
 
 
 
 
 
 
 
 
 
2690
2691	ret = intel_vgt_balloon(ggtt);
2692	if (ret)
2693		return ret;
2694
2695	/* Reserve a mappable slot for our lockless error capture */
2696	ret = drm_mm_insert_node_in_range(&ggtt->vm.mm, &ggtt->error_capture,
2697					  PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
2698					  0, ggtt->mappable_end,
2699					  DRM_MM_INSERT_LOW);
2700	if (ret)
2701		return ret;
2702
2703	/*
2704	 * The upper portion of the GuC address space has a sizeable hole
2705	 * (several MB) that is inaccessible by GuC. Reserve this range within
2706	 * GGTT as it can comfortably hold GuC/HuC firmware images.
2707	 */
2708	ret = ggtt_reserve_guc_top(ggtt);
2709	if (ret)
2710		goto err;
 
 
2711
2712	/* Clear any non-preallocated blocks */
2713	drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) {
2714		DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
2715			      hole_start, hole_end);
2716		ggtt->vm.clear_range(&ggtt->vm, hole_start,
2717				     hole_end - hole_start);
2718	}
2719
2720	/* And finally clear the reserved guard page */
2721	ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE);
2722
2723	return 0;
 
 
 
 
 
2724
2725err:
2726	cleanup_init_ggtt(ggtt);
2727	return ret;
2728}
 
 
2729
2730int i915_init_ggtt(struct drm_i915_private *i915)
2731{
2732	int ret;
 
 
 
 
 
2733
2734	ret = init_ggtt(&i915->ggtt);
2735	if (ret)
2736		return ret;
 
2737
2738	if (INTEL_PPGTT(i915) == INTEL_PPGTT_ALIASING) {
2739		ret = init_aliasing_ppgtt(&i915->ggtt);
2740		if (ret)
2741			cleanup_init_ggtt(&i915->ggtt);
2742	}
2743
2744	return 0;
2745}
2746
2747static void ggtt_cleanup_hw(struct i915_ggtt *ggtt)
2748{
2749	struct drm_i915_private *i915 = ggtt->vm.i915;
2750	struct i915_vma *vma, *vn;
2751
2752	ggtt->vm.closed = true;
 
2753
2754	rcu_barrier(); /* flush the RCU'ed__i915_vm_release */
2755	flush_workqueue(i915->wq);
2756
2757	mutex_lock(&i915->drm.struct_mutex);
 
 
 
2758
2759	list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link)
2760		WARN_ON(i915_vma_unbind(vma));
2761
2762	if (drm_mm_node_allocated(&ggtt->error_capture))
2763		drm_mm_remove_node(&ggtt->error_capture);
2764
2765	ggtt_release_guc_top(ggtt);
2766
2767	if (drm_mm_initialized(&ggtt->vm.mm)) {
2768		intel_vgt_deballoon(ggtt);
2769		i915_address_space_fini(&ggtt->vm);
2770	}
2771
2772	ggtt->vm.cleanup(&ggtt->vm);
2773
2774	mutex_unlock(&i915->drm.struct_mutex);
 
 
2775
2776	arch_phys_wc_del(ggtt->mtrr);
2777	io_mapping_fini(&ggtt->iomap);
2778}
2779
2780/**
2781 * i915_ggtt_driver_release - Clean up GGTT hardware initialization
2782 * @i915: i915 device
2783 */
2784void i915_ggtt_driver_release(struct drm_i915_private *i915)
2785{
2786	struct pagevec *pvec;
2787
2788	fini_aliasing_ppgtt(&i915->ggtt);
2789
2790	ggtt_cleanup_hw(&i915->ggtt);
2791
2792	pvec = &i915->mm.wc_stash.pvec;
2793	if (pvec->nr) {
2794		set_pages_array_wb(pvec->pages, pvec->nr);
2795		__pagevec_release(pvec);
2796	}
2797
2798	i915_gem_cleanup_stolen(i915);
2799}
2800
2801static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
2802{
2803	snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
2804	snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
2805	return snb_gmch_ctl << 20;
2806}
2807
2808static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
2809{
2810	bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
2811	bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
2812	if (bdw_gmch_ctl)
2813		bdw_gmch_ctl = 1 << bdw_gmch_ctl;
2814
2815#ifdef CONFIG_X86_32
2816	/* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * I915_GTT_PAGE_SIZE */
2817	if (bdw_gmch_ctl > 4)
2818		bdw_gmch_ctl = 4;
2819#endif
2820
2821	return bdw_gmch_ctl << 20;
2822}
2823
2824static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
2825{
2826	gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
2827	gmch_ctrl &= SNB_GMCH_GGMS_MASK;
2828
2829	if (gmch_ctrl)
2830		return 1 << (20 + gmch_ctrl);
2831
2832	return 0;
2833}
2834
2835static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
 
 
 
 
 
 
 
2836{
2837	struct drm_i915_private *dev_priv = ggtt->vm.i915;
2838	struct pci_dev *pdev = dev_priv->drm.pdev;
2839	phys_addr_t phys_addr;
2840	int ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2841
2842	/* For Modern GENs the PTEs and register space are split in the BAR */
2843	phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2;
 
2844
2845	/*
2846	 * On BXT+/CNL+ writes larger than 64 bit to the GTT pagetable range
2847	 * will be dropped. For WC mappings in general we have 64 byte burst
2848	 * writes when the WC buffer is flushed, so we can't use it, but have to
2849	 * resort to an uncached mapping. The WC issue is easily caught by the
2850	 * readback check when writing GTT PTE entries.
2851	 */
2852	if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10)
2853		ggtt->gsm = ioremap_nocache(phys_addr, size);
2854	else
2855		ggtt->gsm = ioremap_wc(phys_addr, size);
2856	if (!ggtt->gsm) {
2857		DRM_ERROR("Failed to map the ggtt page table\n");
2858		return -ENOMEM;
2859	}
2860
2861	ret = setup_scratch_page(&ggtt->vm, GFP_DMA32);
2862	if (ret) {
2863		DRM_ERROR("Scratch setup failed\n");
2864		/* iounmap will also get called at remove, but meh */
2865		iounmap(ggtt->gsm);
2866		return ret;
2867	}
2868
2869	ggtt->vm.scratch[0].encode =
2870		ggtt->vm.pte_encode(px_dma(&ggtt->vm.scratch[0]),
2871				    I915_CACHE_NONE, 0);
2872
2873	return 0;
2874}
2875
2876static void tgl_setup_private_ppat(struct drm_i915_private *dev_priv)
2877{
2878	/* TGL doesn't support LLC or AGE settings */
2879	I915_WRITE(GEN12_PAT_INDEX(0), GEN8_PPAT_WB);
2880	I915_WRITE(GEN12_PAT_INDEX(1), GEN8_PPAT_WC);
2881	I915_WRITE(GEN12_PAT_INDEX(2), GEN8_PPAT_WT);
2882	I915_WRITE(GEN12_PAT_INDEX(3), GEN8_PPAT_UC);
2883	I915_WRITE(GEN12_PAT_INDEX(4), GEN8_PPAT_WB);
2884	I915_WRITE(GEN12_PAT_INDEX(5), GEN8_PPAT_WB);
2885	I915_WRITE(GEN12_PAT_INDEX(6), GEN8_PPAT_WB);
2886	I915_WRITE(GEN12_PAT_INDEX(7), GEN8_PPAT_WB);
2887}
2888
2889static void cnl_setup_private_ppat(struct drm_i915_private *dev_priv)
2890{
2891	I915_WRITE(GEN10_PAT_INDEX(0), GEN8_PPAT_WB | GEN8_PPAT_LLC);
2892	I915_WRITE(GEN10_PAT_INDEX(1), GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
2893	I915_WRITE(GEN10_PAT_INDEX(2), GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
2894	I915_WRITE(GEN10_PAT_INDEX(3), GEN8_PPAT_UC);
2895	I915_WRITE(GEN10_PAT_INDEX(4), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
2896	I915_WRITE(GEN10_PAT_INDEX(5), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
2897	I915_WRITE(GEN10_PAT_INDEX(6), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
2898	I915_WRITE(GEN10_PAT_INDEX(7), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
2899}
2900
2901/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
2902 * bits. When using advanced contexts each context stores its own PAT, but
2903 * writing this data shouldn't be harmful even in those cases. */
2904static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv)
2905{
2906	u64 pat;
2907
2908	pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) |	/* for normal objects, no eLLC */
2909	      GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) |	/* for something pointing to ptes? */
2910	      GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) |	/* for scanout with eLLC */
2911	      GEN8_PPAT(3, GEN8_PPAT_UC) |			/* Uncached objects, mostly for scanout */
2912	      GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
2913	      GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
2914	      GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
2915	      GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
2916
2917	I915_WRITE(GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
2918	I915_WRITE(GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2919}
2920
2921static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
2922{
2923	u64 pat;
2924
2925	/*
2926	 * Map WB on BDW to snooped on CHV.
2927	 *
2928	 * Only the snoop bit has meaning for CHV, the rest is
2929	 * ignored.
2930	 *
2931	 * The hardware will never snoop for certain types of accesses:
2932	 * - CPU GTT (GMADR->GGTT->no snoop->memory)
2933	 * - PPGTT page tables
2934	 * - some other special cycles
2935	 *
2936	 * As with BDW, we also need to consider the following for GT accesses:
2937	 * "For GGTT, there is NO pat_sel[2:0] from the entry,
2938	 * so RTL will always use the value corresponding to
2939	 * pat_sel = 000".
2940	 * Which means we must set the snoop bit in PAT entry 0
2941	 * in order to keep the global status page working.
2942	 */
2943
2944	pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
2945	      GEN8_PPAT(1, 0) |
2946	      GEN8_PPAT(2, 0) |
2947	      GEN8_PPAT(3, 0) |
2948	      GEN8_PPAT(4, CHV_PPAT_SNOOP) |
2949	      GEN8_PPAT(5, CHV_PPAT_SNOOP) |
2950	      GEN8_PPAT(6, CHV_PPAT_SNOOP) |
2951	      GEN8_PPAT(7, CHV_PPAT_SNOOP);
2952
2953	I915_WRITE(GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
2954	I915_WRITE(GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
2955}
2956
2957static void gen6_gmch_remove(struct i915_address_space *vm)
2958{
2959	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2960
2961	iounmap(ggtt->gsm);
2962	cleanup_scratch_page(vm);
2963}
2964
2965static void setup_private_pat(struct drm_i915_private *dev_priv)
2966{
2967	GEM_BUG_ON(INTEL_GEN(dev_priv) < 8);
2968
2969	if (INTEL_GEN(dev_priv) >= 12)
2970		tgl_setup_private_ppat(dev_priv);
2971	else if (INTEL_GEN(dev_priv) >= 10)
2972		cnl_setup_private_ppat(dev_priv);
2973	else if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
2974		chv_setup_private_ppat(dev_priv);
2975	else
2976		bdw_setup_private_ppat(dev_priv);
2977}
2978
2979static int gen8_gmch_probe(struct i915_ggtt *ggtt)
 
 
 
 
2980{
2981	struct drm_i915_private *dev_priv = ggtt->vm.i915;
2982	struct pci_dev *pdev = dev_priv->drm.pdev;
2983	unsigned int size;
2984	u16 snb_gmch_ctl;
2985	int err;
2986
2987	/* TODO: We're not aware of mappable constraints on gen8 yet */
2988	ggtt->gmadr =
2989		(struct resource) DEFINE_RES_MEM(pci_resource_start(pdev, 2),
2990						 pci_resource_len(pdev, 2));
2991	ggtt->mappable_end = resource_size(&ggtt->gmadr);
2992
2993	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(39));
2994	if (!err)
2995		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39));
2996	if (err)
2997		DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
2998
2999	pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
3000	if (IS_CHERRYVIEW(dev_priv))
3001		size = chv_get_total_gtt_size(snb_gmch_ctl);
3002	else
3003		size = gen8_get_total_gtt_size(snb_gmch_ctl);
3004
3005	ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
3006	ggtt->vm.cleanup = gen6_gmch_remove;
3007	ggtt->vm.insert_page = gen8_ggtt_insert_page;
3008	ggtt->vm.clear_range = nop_clear_range;
3009	if (intel_scanout_needs_vtd_wa(dev_priv))
3010		ggtt->vm.clear_range = gen8_ggtt_clear_range;
3011
3012	ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
 
 
 
 
 
 
 
 
 
3013
3014	/* Serialize GTT updates with aperture access on BXT if VT-d is on. */
3015	if (intel_ggtt_update_needs_vtd_wa(dev_priv) ||
3016	    IS_CHERRYVIEW(dev_priv) /* fails with concurrent use/update */) {
3017		ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
3018		ggtt->vm.insert_page    = bxt_vtd_ggtt_insert_page__BKL;
3019		if (ggtt->vm.clear_range != nop_clear_range)
3020			ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL;
3021	}
3022
3023	ggtt->invalidate = gen6_ggtt_invalidate;
 
 
 
3024
3025	ggtt->vm.vma_ops.bind_vma    = ggtt_bind_vma;
3026	ggtt->vm.vma_ops.unbind_vma  = ggtt_unbind_vma;
3027	ggtt->vm.vma_ops.set_pages   = ggtt_set_pages;
3028	ggtt->vm.vma_ops.clear_pages = clear_pages;
3029
3030	ggtt->vm.pte_encode = gen8_pte_encode;
 
 
 
3031
3032	setup_private_pat(dev_priv);
 
3033
3034	return ggtt_probe_common(ggtt, size);
3035}
3036
3037static int gen6_gmch_probe(struct i915_ggtt *ggtt)
 
 
 
 
3038{
3039	struct drm_i915_private *dev_priv = ggtt->vm.i915;
3040	struct pci_dev *pdev = dev_priv->drm.pdev;
3041	unsigned int size;
3042	u16 snb_gmch_ctl;
3043	int err;
3044
3045	ggtt->gmadr =
3046		(struct resource) DEFINE_RES_MEM(pci_resource_start(pdev, 2),
3047						 pci_resource_len(pdev, 2));
3048	ggtt->mappable_end = resource_size(&ggtt->gmadr);
3049
3050	/* 64/512MB is the current min/max we actually know of, but this is just
3051	 * a coarse sanity check.
3052	 */
3053	if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) {
3054		DRM_ERROR("Unknown GMADR size (%pa)\n", &ggtt->mappable_end);
 
3055		return -ENXIO;
3056	}
3057
3058	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
3059	if (!err)
3060		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
3061	if (err)
3062		DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
3063	pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
3064
3065	size = gen6_get_total_gtt_size(snb_gmch_ctl);
3066	ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE;
3067
3068	ggtt->vm.clear_range = nop_clear_range;
3069	if (!HAS_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv))
3070		ggtt->vm.clear_range = gen6_ggtt_clear_range;
3071	ggtt->vm.insert_page = gen6_ggtt_insert_page;
3072	ggtt->vm.insert_entries = gen6_ggtt_insert_entries;
3073	ggtt->vm.cleanup = gen6_gmch_remove;
3074
3075	ggtt->invalidate = gen6_ggtt_invalidate;
3076
3077	if (HAS_EDRAM(dev_priv))
3078		ggtt->vm.pte_encode = iris_pte_encode;
3079	else if (IS_HASWELL(dev_priv))
3080		ggtt->vm.pte_encode = hsw_pte_encode;
3081	else if (IS_VALLEYVIEW(dev_priv))
3082		ggtt->vm.pte_encode = byt_pte_encode;
3083	else if (INTEL_GEN(dev_priv) >= 7)
3084		ggtt->vm.pte_encode = ivb_pte_encode;
3085	else
3086		ggtt->vm.pte_encode = snb_pte_encode;
3087
3088	ggtt->vm.vma_ops.bind_vma    = ggtt_bind_vma;
3089	ggtt->vm.vma_ops.unbind_vma  = ggtt_unbind_vma;
3090	ggtt->vm.vma_ops.set_pages   = ggtt_set_pages;
3091	ggtt->vm.vma_ops.clear_pages = clear_pages;
3092
3093	return ggtt_probe_common(ggtt, size);
3094}
3095
3096static void i915_gmch_remove(struct i915_address_space *vm)
3097{
3098	intel_gmch_remove();
 
 
 
 
3099}
3100
3101static int i915_gmch_probe(struct i915_ggtt *ggtt)
 
 
 
 
3102{
3103	struct drm_i915_private *dev_priv = ggtt->vm.i915;
3104	phys_addr_t gmadr_base;
3105	int ret;
3106
3107	ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL);
3108	if (!ret) {
3109		DRM_ERROR("failed to set up gmch\n");
3110		return -EIO;
3111	}
3112
3113	intel_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end);
3114
3115	ggtt->gmadr =
3116		(struct resource) DEFINE_RES_MEM(gmadr_base,
3117						 ggtt->mappable_end);
3118
3119	ggtt->do_idle_maps = needs_idle_maps(dev_priv);
3120	ggtt->vm.insert_page = i915_ggtt_insert_page;
3121	ggtt->vm.insert_entries = i915_ggtt_insert_entries;
3122	ggtt->vm.clear_range = i915_ggtt_clear_range;
3123	ggtt->vm.cleanup = i915_gmch_remove;
3124
3125	ggtt->invalidate = gmch_ggtt_invalidate;
 
 
 
 
3126
3127	ggtt->vm.vma_ops.bind_vma    = ggtt_bind_vma;
3128	ggtt->vm.vma_ops.unbind_vma  = ggtt_unbind_vma;
3129	ggtt->vm.vma_ops.set_pages   = ggtt_set_pages;
3130	ggtt->vm.vma_ops.clear_pages = clear_pages;
3131
3132	if (unlikely(ggtt->do_idle_maps))
3133		dev_notice(dev_priv->drm.dev,
3134			   "Applying Ironlake quirks for intel_iommu\n");
3135
3136	return 0;
3137}
3138
3139static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt)
3140{
3141	struct drm_i915_private *i915 = gt->i915;
3142	int ret;
3143
3144	ggtt->vm.gt = gt;
3145	ggtt->vm.i915 = i915;
3146	ggtt->vm.dma = &i915->drm.pdev->dev;
3147
3148	if (INTEL_GEN(i915) <= 5)
3149		ret = i915_gmch_probe(ggtt);
3150	else if (INTEL_GEN(i915) < 8)
3151		ret = gen6_gmch_probe(ggtt);
3152	else
3153		ret = gen8_gmch_probe(ggtt);
3154	if (ret)
3155		return ret;
3156
3157	if ((ggtt->vm.total - 1) >> 32) {
3158		DRM_ERROR("We never expected a Global GTT with more than 32bits"
3159			  " of address space! Found %lldM!\n",
3160			  ggtt->vm.total >> 20);
3161		ggtt->vm.total = 1ULL << 32;
3162		ggtt->mappable_end =
3163			min_t(u64, ggtt->mappable_end, ggtt->vm.total);
3164	}
3165
3166	if (ggtt->mappable_end > ggtt->vm.total) {
3167		DRM_ERROR("mappable aperture extends past end of GGTT,"
3168			  " aperture=%pa, total=%llx\n",
3169			  &ggtt->mappable_end, ggtt->vm.total);
3170		ggtt->mappable_end = ggtt->vm.total;
3171	}
3172
3173	/* GMADR is the PCI mmio aperture into the global GTT. */
3174	DRM_DEBUG_DRIVER("GGTT size = %lluM\n", ggtt->vm.total >> 20);
3175	DRM_DEBUG_DRIVER("GMADR size = %lluM\n", (u64)ggtt->mappable_end >> 20);
3176	DRM_DEBUG_DRIVER("DSM size = %lluM\n",
3177			 (u64)resource_size(&intel_graphics_stolen_res) >> 20);
3178
3179	return 0;
3180}
3181
3182/**
3183 * i915_ggtt_probe_hw - Probe GGTT hardware location
3184 * @i915: i915 device
3185 */
3186int i915_ggtt_probe_hw(struct drm_i915_private *i915)
3187{
 
 
3188	int ret;
3189
3190	ret = ggtt_probe_hw(&i915->ggtt, &i915->gt);
3191	if (ret)
3192		return ret;
3193
3194	if (intel_vtd_active())
3195		dev_info(i915->drm.dev, "VT-d active for gfx access\n");
3196
3197	return 0;
3198}
3199
3200static int ggtt_init_hw(struct i915_ggtt *ggtt)
3201{
3202	struct drm_i915_private *i915 = ggtt->vm.i915;
3203	int ret = 0;
3204
3205	mutex_lock(&i915->drm.struct_mutex);
3206
3207	i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
3208
3209	ggtt->vm.is_ggtt = true;
3210
3211	/* Only VLV supports read-only GGTT mappings */
3212	ggtt->vm.has_read_only = IS_VALLEYVIEW(i915);
3213
3214	if (!HAS_LLC(i915) && !HAS_PPGTT(i915))
3215		ggtt->vm.mm.color_adjust = i915_gtt_color_adjust;
3216
3217	if (!io_mapping_init_wc(&ggtt->iomap,
3218				ggtt->gmadr.start,
3219				ggtt->mappable_end)) {
3220		ggtt->vm.cleanup(&ggtt->vm);
3221		ret = -EIO;
3222		goto out;
3223	}
3224
3225	ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start, ggtt->mappable_end);
3226
3227	i915_ggtt_init_fences(ggtt);
3228
3229out:
3230	mutex_unlock(&i915->drm.struct_mutex);
3231
3232	return ret;
3233}
3234
3235/**
3236 * i915_ggtt_init_hw - Initialize GGTT hardware
3237 * @dev_priv: i915 device
3238 */
3239int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
3240{
3241	int ret;
3242
3243	stash_init(&dev_priv->mm.wc_stash);
3244
3245	/* Note that we use page colouring to enforce a guard page at the
3246	 * end of the address space. This is required as the CS may prefetch
3247	 * beyond the end of the batch buffer, across the page boundary,
3248	 * and beyond the end of the GTT if we do not provide a guard.
3249	 */
3250	ret = ggtt_init_hw(&dev_priv->ggtt);
3251	if (ret)
3252		return ret;
3253
3254	/*
3255	 * Initialise stolen early so that we may reserve preallocated
3256	 * objects for the BIOS to KMS transition.
3257	 */
3258	ret = i915_gem_init_stolen(dev_priv);
3259	if (ret)
3260		goto out_gtt_cleanup;
3261
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3262	return 0;
3263
3264out_gtt_cleanup:
3265	dev_priv->ggtt.vm.cleanup(&dev_priv->ggtt.vm);
 
3266	return ret;
3267}
3268
3269int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv)
3270{
3271	if (INTEL_GEN(dev_priv) < 6 && !intel_enable_gtt())
3272		return -EIO;
 
 
 
 
 
3273
3274	return 0;
3275}
 
 
 
 
 
 
 
 
 
 
 
3276
3277void i915_ggtt_enable_guc(struct i915_ggtt *ggtt)
3278{
3279	GEM_BUG_ON(ggtt->invalidate != gen6_ggtt_invalidate);
3280
3281	ggtt->invalidate = guc_ggtt_invalidate;
 
3282
3283	ggtt->invalidate(ggtt);
3284}
 
 
 
 
 
 
 
3285
3286void i915_ggtt_disable_guc(struct i915_ggtt *ggtt)
3287{
3288	/* XXX Temporary pardon for error unload */
3289	if (ggtt->invalidate == gen6_ggtt_invalidate)
3290		return;
 
 
 
 
 
3291
3292	/* We should only be called after i915_ggtt_enable_guc() */
3293	GEM_BUG_ON(ggtt->invalidate != guc_ggtt_invalidate);
 
3294
3295	ggtt->invalidate = gen6_ggtt_invalidate;
 
3296
3297	ggtt->invalidate(ggtt);
 
 
 
 
 
3298}
3299
3300static void ggtt_restore_mappings(struct i915_ggtt *ggtt)
 
 
 
3301{
3302	struct i915_vma *vma, *vn;
3303	bool flush = false;
3304
3305	intel_gt_check_and_clear_faults(ggtt->vm.gt);
 
3306
3307	mutex_lock(&ggtt->vm.mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3308
3309	/* First fill our portion of the GTT with scratch pages */
3310	ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
3311	ggtt->vm.closed = true; /* skip rewriting PTE on VMA unbind */
3312
3313	/* clflush objects bound into the GGTT and rebind them. */
3314	list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) {
3315		struct drm_i915_gem_object *obj = vma->obj;
3316
3317		if (!(vma->flags & I915_VMA_GLOBAL_BIND))
3318			continue;
 
 
 
3319
3320		mutex_unlock(&ggtt->vm.mutex);
 
 
 
3321
3322		if (!i915_vma_unbind(vma))
3323			goto lock;
3324
3325		WARN_ON(i915_vma_bind(vma,
3326				      obj ? obj->cache_level : 0,
3327				      PIN_UPDATE));
3328		if (obj) { /* only used during resume => exclusive access */
3329			flush |= fetch_and_zero(&obj->write_domain);
3330			obj->read_domains |= I915_GEM_DOMAIN_GTT;
3331		}
3332
3333lock:
3334		mutex_lock(&ggtt->vm.mutex);
3335	}
3336
3337	ggtt->vm.closed = false;
3338	ggtt->invalidate(ggtt);
3339
3340	mutex_unlock(&ggtt->vm.mutex);
 
3341
3342	if (flush)
3343		wbinvd_on_all_cpus();
3344}
3345
3346void i915_gem_restore_gtt_mappings(struct drm_i915_private *i915)
3347{
3348	ggtt_restore_mappings(&i915->ggtt);
3349
3350	if (INTEL_GEN(i915) >= 8)
3351		setup_private_pat(i915);
3352}
3353
3354static struct scatterlist *
3355rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset,
3356	     unsigned int width, unsigned int height,
3357	     unsigned int stride,
3358	     struct sg_table *st, struct scatterlist *sg)
3359{
3360	unsigned int column, row;
3361	unsigned int src_idx;
3362
 
 
 
 
 
3363	for (column = 0; column < width; column++) {
3364		src_idx = stride * (height - 1) + column + offset;
3365		for (row = 0; row < height; row++) {
3366			st->nents++;
3367			/* We don't need the pages, but need to initialize
3368			 * the entries so the sg list can be happily traversed.
3369			 * The only thing we need are DMA addresses.
3370			 */
3371			sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0);
3372			sg_dma_address(sg) =
3373				i915_gem_object_get_dma_address(obj, src_idx);
3374			sg_dma_len(sg) = I915_GTT_PAGE_SIZE;
3375			sg = sg_next(sg);
3376			src_idx -= stride;
3377		}
3378	}
3379
3380	return sg;
3381}
3382
3383static noinline struct sg_table *
3384intel_rotate_pages(struct intel_rotation_info *rot_info,
3385		   struct drm_i915_gem_object *obj)
3386{
3387	unsigned int size = intel_rotation_info_size(rot_info);
 
 
 
 
3388	struct sg_table *st;
 
3389	struct scatterlist *sg;
3390	int ret = -ENOMEM;
3391	int i;
3392
3393	/* Allocate target SG list. */
3394	st = kmalloc(sizeof(*st), GFP_KERNEL);
3395	if (!st)
3396		goto err_st_alloc;
3397
3398	ret = sg_alloc_table(st, size, GFP_KERNEL);
3399	if (ret)
3400		goto err_sg_alloc;
3401
3402	st->nents = 0;
3403	sg = st->sgl;
3404
3405	for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) {
3406		sg = rotate_pages(obj, rot_info->plane[i].offset,
3407				  rot_info->plane[i].width, rot_info->plane[i].height,
3408				  rot_info->plane[i].stride, st, sg);
3409	}
3410
3411	return st;
3412
3413err_sg_alloc:
3414	kfree(st);
3415err_st_alloc:
3416
3417	DRM_DEBUG_DRIVER("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
3418			 obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
3419
3420	return ERR_PTR(ret);
3421}
3422
3423static struct scatterlist *
3424remap_pages(struct drm_i915_gem_object *obj, unsigned int offset,
3425	    unsigned int width, unsigned int height,
3426	    unsigned int stride,
3427	    struct sg_table *st, struct scatterlist *sg)
3428{
3429	unsigned int row;
3430
3431	for (row = 0; row < height; row++) {
3432		unsigned int left = width * I915_GTT_PAGE_SIZE;
3433
3434		while (left) {
3435			dma_addr_t addr;
3436			unsigned int length;
3437
3438			/* We don't need the pages, but need to initialize
3439			 * the entries so the sg list can be happily traversed.
3440			 * The only thing we need are DMA addresses.
3441			 */
3442
3443			addr = i915_gem_object_get_dma_address_len(obj, offset, &length);
3444
3445			length = min(left, length);
3446
3447			st->nents++;
3448
3449			sg_set_page(sg, NULL, length, 0);
3450			sg_dma_address(sg) = addr;
3451			sg_dma_len(sg) = length;
3452			sg = sg_next(sg);
3453
3454			offset += length / I915_GTT_PAGE_SIZE;
3455			left -= length;
3456		}
3457
3458		offset += stride - width;
3459	}
3460
3461	return sg;
3462}
3463
3464static noinline struct sg_table *
3465intel_remap_pages(struct intel_remapped_info *rem_info,
3466		  struct drm_i915_gem_object *obj)
3467{
3468	unsigned int size = intel_remapped_info_size(rem_info);
3469	struct sg_table *st;
3470	struct scatterlist *sg;
3471	int ret = -ENOMEM;
3472	int i;
3473
3474	/* Allocate target SG list. */
3475	st = kmalloc(sizeof(*st), GFP_KERNEL);
3476	if (!st)
3477		goto err_st_alloc;
3478
3479	ret = sg_alloc_table(st, size, GFP_KERNEL);
3480	if (ret)
3481		goto err_sg_alloc;
3482
3483	st->nents = 0;
3484	sg = st->sgl;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3485
3486	for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) {
3487		sg = remap_pages(obj, rem_info->plane[i].offset,
3488				 rem_info->plane[i].width, rem_info->plane[i].height,
3489				 rem_info->plane[i].stride, st, sg);
3490	}
3491
3492	i915_sg_trim(st);
3493
3494	return st;
3495
3496err_sg_alloc:
3497	kfree(st);
3498err_st_alloc:
 
3499
3500	DRM_DEBUG_DRIVER("Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n",
3501			 obj->base.size, rem_info->plane[0].width, rem_info->plane[0].height, size);
3502
 
 
 
3503	return ERR_PTR(ret);
3504}
3505
3506static noinline struct sg_table *
3507intel_partial_pages(const struct i915_ggtt_view *view,
3508		    struct drm_i915_gem_object *obj)
3509{
3510	struct sg_table *st;
3511	struct scatterlist *sg, *iter;
3512	unsigned int count = view->partial.size;
3513	unsigned int offset;
3514	int ret = -ENOMEM;
3515
3516	st = kmalloc(sizeof(*st), GFP_KERNEL);
3517	if (!st)
3518		goto err_st_alloc;
3519
3520	ret = sg_alloc_table(st, count, GFP_KERNEL);
3521	if (ret)
3522		goto err_sg_alloc;
3523
3524	iter = i915_gem_object_get_sg(obj, view->partial.offset, &offset);
3525	GEM_BUG_ON(!iter);
3526
3527	sg = st->sgl;
3528	st->nents = 0;
3529	do {
3530		unsigned int len;
 
 
 
3531
3532		len = min(iter->length - (offset << PAGE_SHIFT),
3533			  count << PAGE_SHIFT);
3534		sg_set_page(sg, NULL, len, 0);
3535		sg_dma_address(sg) =
3536			sg_dma_address(iter) + (offset << PAGE_SHIFT);
3537		sg_dma_len(sg) = len;
3538
 
3539		st->nents++;
3540		count -= len >> PAGE_SHIFT;
3541		if (count == 0) {
3542			sg_mark_end(sg);
3543			i915_sg_trim(st); /* Drop any unused tail entries. */
3544
3545			return st;
3546		}
3547
3548		sg = __sg_next(sg);
3549		iter = __sg_next(iter);
3550		offset = 0;
3551	} while (1);
3552
3553err_sg_alloc:
3554	kfree(st);
3555err_st_alloc:
3556	return ERR_PTR(ret);
3557}
3558
3559static int
3560i915_get_ggtt_vma_pages(struct i915_vma *vma)
3561{
3562	int ret;
3563
3564	/* The vma->pages are only valid within the lifespan of the borrowed
3565	 * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so
3566	 * must be the vma->pages. A simple rule is that vma->pages must only
3567	 * be accessed when the obj->mm.pages are pinned.
3568	 */
3569	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
3570
3571	switch (vma->ggtt_view.type) {
3572	default:
3573		GEM_BUG_ON(vma->ggtt_view.type);
3574		/* fall through */
3575	case I915_GGTT_VIEW_NORMAL:
3576		vma->pages = vma->obj->mm.pages;
3577		return 0;
3578
3579	case I915_GGTT_VIEW_ROTATED:
3580		vma->pages =
3581			intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj);
3582		break;
3583
3584	case I915_GGTT_VIEW_REMAPPED:
3585		vma->pages =
3586			intel_remap_pages(&vma->ggtt_view.remapped, vma->obj);
3587		break;
 
 
3588
3589	case I915_GGTT_VIEW_PARTIAL:
3590		vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
3591		break;
3592	}
3593
3594	ret = 0;
3595	if (IS_ERR(vma->pages)) {
3596		ret = PTR_ERR(vma->pages);
3597		vma->pages = NULL;
3598		DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n",
3599			  vma->ggtt_view.type, ret);
3600	}
 
3601	return ret;
3602}
3603
3604/**
3605 * i915_gem_gtt_reserve - reserve a node in an address_space (GTT)
3606 * @vm: the &struct i915_address_space
3607 * @node: the &struct drm_mm_node (typically i915_vma.mode)
3608 * @size: how much space to allocate inside the GTT,
3609 *        must be #I915_GTT_PAGE_SIZE aligned
3610 * @offset: where to insert inside the GTT,
3611 *          must be #I915_GTT_MIN_ALIGNMENT aligned, and the node
3612 *          (@offset + @size) must fit within the address space
3613 * @color: color to apply to node, if this node is not from a VMA,
3614 *         color must be #I915_COLOR_UNEVICTABLE
3615 * @flags: control search and eviction behaviour
3616 *
3617 * i915_gem_gtt_reserve() tries to insert the @node at the exact @offset inside
3618 * the address space (using @size and @color). If the @node does not fit, it
3619 * tries to evict any overlapping nodes from the GTT, including any
3620 * neighbouring nodes if the colors do not match (to ensure guard pages between
3621 * differing domains). See i915_gem_evict_for_node() for the gory details
3622 * on the eviction algorithm. #PIN_NONBLOCK may used to prevent waiting on
3623 * evicting active overlapping objects, and any overlapping node that is pinned
3624 * or marked as unevictable will also result in failure.
3625 *
3626 * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
3627 * asked to wait for eviction and interrupted.
3628 */
3629int i915_gem_gtt_reserve(struct i915_address_space *vm,
3630			 struct drm_mm_node *node,
3631			 u64 size, u64 offset, unsigned long color,
3632			 unsigned int flags)
3633{
3634	int err;
3635
3636	GEM_BUG_ON(!size);
3637	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
3638	GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT));
3639	GEM_BUG_ON(range_overflows(offset, size, vm->total));
3640	GEM_BUG_ON(vm == &vm->i915->ggtt.alias->vm);
3641	GEM_BUG_ON(drm_mm_node_allocated(node));
3642
3643	node->size = size;
3644	node->start = offset;
3645	node->color = color;
3646
3647	err = drm_mm_reserve_node(&vm->mm, node);
3648	if (err != -ENOSPC)
3649		return err;
3650
3651	if (flags & PIN_NOEVICT)
3652		return -ENOSPC;
3653
3654	err = i915_gem_evict_for_node(vm, node, flags);
3655	if (err == 0)
3656		err = drm_mm_reserve_node(&vm->mm, node);
 
 
3657
3658	return err;
3659}
 
 
 
 
 
3660
3661static u64 random_offset(u64 start, u64 end, u64 len, u64 align)
3662{
3663	u64 range, addr;
 
 
 
 
 
 
 
 
3664
3665	GEM_BUG_ON(range_overflows(start, len, end));
3666	GEM_BUG_ON(round_up(start, align) > round_down(end - len, align));
 
3667
3668	range = round_down(end - len, align) - round_up(start, align);
3669	if (range) {
3670		if (sizeof(unsigned long) == sizeof(u64)) {
3671			addr = get_random_long();
3672		} else {
3673			addr = get_random_int();
3674			if (range > U32_MAX) {
3675				addr <<= 32;
3676				addr |= get_random_int();
3677			}
3678		}
3679		div64_u64_rem(addr, range, &addr);
3680		start += addr;
3681	}
3682
3683	return round_up(start, align);
3684}
3685
3686/**
3687 * i915_gem_gtt_insert - insert a node into an address_space (GTT)
3688 * @vm: the &struct i915_address_space
3689 * @node: the &struct drm_mm_node (typically i915_vma.node)
3690 * @size: how much space to allocate inside the GTT,
3691 *        must be #I915_GTT_PAGE_SIZE aligned
3692 * @alignment: required alignment of starting offset, may be 0 but
3693 *             if specified, this must be a power-of-two and at least
3694 *             #I915_GTT_MIN_ALIGNMENT
3695 * @color: color to apply to node
3696 * @start: start of any range restriction inside GTT (0 for all),
3697 *         must be #I915_GTT_PAGE_SIZE aligned
3698 * @end: end of any range restriction inside GTT (U64_MAX for all),
3699 *       must be #I915_GTT_PAGE_SIZE aligned if not U64_MAX
3700 * @flags: control search and eviction behaviour
3701 *
3702 * i915_gem_gtt_insert() first searches for an available hole into which
3703 * is can insert the node. The hole address is aligned to @alignment and
3704 * its @size must then fit entirely within the [@start, @end] bounds. The
3705 * nodes on either side of the hole must match @color, or else a guard page
3706 * will be inserted between the two nodes (or the node evicted). If no
3707 * suitable hole is found, first a victim is randomly selected and tested
3708 * for eviction, otherwise then the LRU list of objects within the GTT
3709 * is scanned to find the first set of replacement nodes to create the hole.
3710 * Those old overlapping nodes are evicted from the GTT (and so must be
3711 * rebound before any future use). Any node that is currently pinned cannot
3712 * be evicted (see i915_vma_pin()). Similar if the node's VMA is currently
3713 * active and #PIN_NONBLOCK is specified, that node is also skipped when
3714 * searching for an eviction candidate. See i915_gem_evict_something() for
3715 * the gory details on the eviction algorithm.
3716 *
3717 * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
3718 * asked to wait for eviction and interrupted.
3719 */
3720int i915_gem_gtt_insert(struct i915_address_space *vm,
3721			struct drm_mm_node *node,
3722			u64 size, u64 alignment, unsigned long color,
3723			u64 start, u64 end, unsigned int flags)
3724{
3725	enum drm_mm_insert_mode mode;
3726	u64 offset;
3727	int err;
3728
3729	lockdep_assert_held(&vm->i915->drm.struct_mutex);
3730	GEM_BUG_ON(!size);
3731	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
3732	GEM_BUG_ON(alignment && !is_power_of_2(alignment));
3733	GEM_BUG_ON(alignment && !IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
3734	GEM_BUG_ON(start >= end);
3735	GEM_BUG_ON(start > 0  && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
3736	GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
3737	GEM_BUG_ON(vm == &vm->i915->ggtt.alias->vm);
3738	GEM_BUG_ON(drm_mm_node_allocated(node));
3739
3740	if (unlikely(range_overflows(start, size, end)))
3741		return -ENOSPC;
3742
3743	if (unlikely(round_up(start, alignment) > round_down(end - size, alignment)))
3744		return -ENOSPC;
3745
3746	mode = DRM_MM_INSERT_BEST;
3747	if (flags & PIN_HIGH)
3748		mode = DRM_MM_INSERT_HIGHEST;
3749	if (flags & PIN_MAPPABLE)
3750		mode = DRM_MM_INSERT_LOW;
3751
3752	/* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
3753	 * so we know that we always have a minimum alignment of 4096.
3754	 * The drm_mm range manager is optimised to return results
3755	 * with zero alignment, so where possible use the optimal
3756	 * path.
3757	 */
3758	BUILD_BUG_ON(I915_GTT_MIN_ALIGNMENT > I915_GTT_PAGE_SIZE);
3759	if (alignment <= I915_GTT_MIN_ALIGNMENT)
3760		alignment = 0;
3761
3762	err = drm_mm_insert_node_in_range(&vm->mm, node,
3763					  size, alignment, color,
3764					  start, end, mode);
3765	if (err != -ENOSPC)
3766		return err;
3767
3768	if (mode & DRM_MM_INSERT_ONCE) {
3769		err = drm_mm_insert_node_in_range(&vm->mm, node,
3770						  size, alignment, color,
3771						  start, end,
3772						  DRM_MM_INSERT_BEST);
3773		if (err != -ENOSPC)
3774			return err;
3775	}
3776
3777	if (flags & PIN_NOEVICT)
3778		return -ENOSPC;
3779
3780	/*
3781	 * No free space, pick a slot at random.
3782	 *
3783	 * There is a pathological case here using a GTT shared between
3784	 * mmap and GPU (i.e. ggtt/aliasing_ppgtt but not full-ppgtt):
3785	 *
3786	 *    |<-- 256 MiB aperture -->||<-- 1792 MiB unmappable -->|
3787	 *         (64k objects)             (448k objects)
3788	 *
3789	 * Now imagine that the eviction LRU is ordered top-down (just because
3790	 * pathology meets real life), and that we need to evict an object to
3791	 * make room inside the aperture. The eviction scan then has to walk
3792	 * the 448k list before it finds one within range. And now imagine that
3793	 * it has to search for a new hole between every byte inside the memcpy,
3794	 * for several simultaneous clients.
3795	 *
3796	 * On a full-ppgtt system, if we have run out of available space, there
3797	 * will be lots and lots of objects in the eviction list! Again,
3798	 * searching that LRU list may be slow if we are also applying any
3799	 * range restrictions (e.g. restriction to low 4GiB) and so, for
3800	 * simplicity and similarilty between different GTT, try the single
3801	 * random replacement first.
3802	 */
3803	offset = random_offset(start, end,
3804			       size, alignment ?: I915_GTT_MIN_ALIGNMENT);
3805	err = i915_gem_gtt_reserve(vm, node, size, offset, color, flags);
3806	if (err != -ENOSPC)
3807		return err;
3808
3809	if (flags & PIN_NOSEARCH)
3810		return -ENOSPC;
3811
3812	/* Randomly selected placement is pinned, do a search */
3813	err = i915_gem_evict_something(vm, size, alignment, color,
3814				       start, end, flags);
3815	if (err)
3816		return err;
3817
3818	return drm_mm_insert_node_in_range(&vm->mm, node,
3819					   size, alignment, color,
3820					   start, end, DRM_MM_INSERT_EVICT);
3821}
3822
3823#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
3824#include "selftests/mock_gtt.c"
3825#include "selftests/i915_gem_gtt.c"
3826#endif