Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1/*
   2 * SPDX-License-Identifier: MIT
   3 *
   4 * Copyright © 2008-2012 Intel Corporation
   5 */
   6
   7#include <linux/errno.h>
   8#include <linux/mutex.h>
   9
  10#include <drm/drm_mm.h>
  11#include <drm/i915_drm.h>
  12
  13#include "gem/i915_gem_lmem.h"
  14#include "gem/i915_gem_region.h"
  15#include "gt/intel_gt.h"
  16#include "gt/intel_gt_mcr.h"
  17#include "gt/intel_gt_regs.h"
  18#include "gt/intel_region_lmem.h"
  19#include "i915_drv.h"
  20#include "i915_gem_stolen.h"
  21#include "i915_pci.h"
  22#include "i915_reg.h"
  23#include "i915_utils.h"
  24#include "i915_vgpu.h"
  25#include "intel_mchbar_regs.h"
  26#include "intel_pci_config.h"
  27
  28/*
  29 * The BIOS typically reserves some of the system's memory for the exclusive
  30 * use of the integrated graphics. This memory is no longer available for
  31 * use by the OS and so the user finds that his system has less memory
  32 * available than he put in. We refer to this memory as stolen.
  33 *
  34 * The BIOS will allocate its framebuffer from the stolen memory. Our
  35 * goal is try to reuse that object for our own fbcon which must always
  36 * be available for panics. Anything else we can reuse the stolen memory
  37 * for is a boon.
  38 */
  39
  40int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915,
  41					 struct drm_mm_node *node, u64 size,
  42					 unsigned alignment, u64 start, u64 end)
  43{
  44	int ret;
  45
  46	if (!drm_mm_initialized(&i915->mm.stolen))
  47		return -ENODEV;
  48
  49	/* WaSkipStolenMemoryFirstPage:bdw+ */
  50	if (GRAPHICS_VER(i915) >= 8 && start < 4096)
  51		start = 4096;
  52
  53	mutex_lock(&i915->mm.stolen_lock);
  54	ret = drm_mm_insert_node_in_range(&i915->mm.stolen, node,
  55					  size, alignment, 0,
  56					  start, end, DRM_MM_INSERT_BEST);
  57	mutex_unlock(&i915->mm.stolen_lock);
  58
  59	return ret;
  60}
  61
  62int i915_gem_stolen_insert_node(struct drm_i915_private *i915,
  63				struct drm_mm_node *node, u64 size,
  64				unsigned alignment)
  65{
  66	return i915_gem_stolen_insert_node_in_range(i915, node,
  67						    size, alignment,
  68						    I915_GEM_STOLEN_BIAS,
  69						    U64_MAX);
  70}
  71
  72void i915_gem_stolen_remove_node(struct drm_i915_private *i915,
  73				 struct drm_mm_node *node)
  74{
  75	mutex_lock(&i915->mm.stolen_lock);
  76	drm_mm_remove_node(node);
  77	mutex_unlock(&i915->mm.stolen_lock);
  78}
  79
  80static bool valid_stolen_size(struct drm_i915_private *i915, struct resource *dsm)
  81{
  82	return (dsm->start != 0 || HAS_LMEMBAR_SMEM_STOLEN(i915)) && dsm->end > dsm->start;
  83}
  84
  85static int adjust_stolen(struct drm_i915_private *i915,
  86			 struct resource *dsm)
  87{
  88	struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
  89	struct intel_uncore *uncore = ggtt->vm.gt->uncore;
  90
  91	if (!valid_stolen_size(i915, dsm))
  92		return -EINVAL;
  93
  94	/*
  95	 * Make sure we don't clobber the GTT if it's within stolen memory
  96	 *
  97	 * TODO: We have yet too encounter the case where the GTT wasn't at the
  98	 * end of stolen. With that assumption we could simplify this.
  99	 */
 100	if (GRAPHICS_VER(i915) <= 4 &&
 101	    !IS_G33(i915) && !IS_PINEVIEW(i915) && !IS_G4X(i915)) {
 102		struct resource stolen[2] = {*dsm, *dsm};
 103		struct resource ggtt_res;
 104		resource_size_t ggtt_start;
 105
 106		ggtt_start = intel_uncore_read(uncore, PGTBL_CTL);
 107		if (GRAPHICS_VER(i915) == 4)
 108			ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) |
 109				     (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
 110		else
 111			ggtt_start &= PGTBL_ADDRESS_LO_MASK;
 112
 113		ggtt_res = DEFINE_RES_MEM(ggtt_start, ggtt_total_entries(ggtt) * 4);
 114
 115		if (ggtt_res.start >= stolen[0].start && ggtt_res.start < stolen[0].end)
 116			stolen[0].end = ggtt_res.start;
 117		if (ggtt_res.end > stolen[1].start && ggtt_res.end <= stolen[1].end)
 118			stolen[1].start = ggtt_res.end;
 119
 120		/* Pick the larger of the two chunks */
 121		if (resource_size(&stolen[0]) > resource_size(&stolen[1]))
 122			*dsm = stolen[0];
 123		else
 124			*dsm = stolen[1];
 125
 126		if (stolen[0].start != stolen[1].start ||
 127		    stolen[0].end != stolen[1].end) {
 128			drm_dbg(&i915->drm,
 129				"GTT within stolen memory at %pR\n",
 130				&ggtt_res);
 131			drm_dbg(&i915->drm, "Stolen memory adjusted to %pR\n",
 132				dsm);
 133		}
 134	}
 135
 136	if (!valid_stolen_size(i915, dsm))
 137		return -EINVAL;
 138
 139	return 0;
 140}
 141
 142static int request_smem_stolen(struct drm_i915_private *i915,
 143			       struct resource *dsm)
 144{
 145	struct resource *r;
 146
 147	/*
 148	 * With stolen lmem, we don't need to request system memory for the
 149	 * address range since it's local to the gpu.
 150	 *
 151	 * Starting MTL, in IGFX devices the stolen memory is exposed via
 152	 * LMEMBAR and shall be considered similar to stolen lmem.
 153	 */
 154	if (HAS_LMEM(i915) || HAS_LMEMBAR_SMEM_STOLEN(i915))
 155		return 0;
 156
 157	/*
 158	 * Verify that nothing else uses this physical address. Stolen
 159	 * memory should be reserved by the BIOS and hidden from the
 160	 * kernel. So if the region is already marked as busy, something
 161	 * is seriously wrong.
 162	 */
 163	r = devm_request_mem_region(i915->drm.dev, dsm->start,
 164				    resource_size(dsm),
 165				    "Graphics Stolen Memory");
 166	if (r == NULL) {
 167		/*
 168		 * One more attempt but this time requesting region from
 169		 * start + 1, as we have seen that this resolves the region
 170		 * conflict with the PCI Bus.
 171		 * This is a BIOS w/a: Some BIOS wrap stolen in the root
 172		 * PCI bus, but have an off-by-one error. Hence retry the
 173		 * reservation starting from 1 instead of 0.
 174		 * There's also BIOS with off-by-one on the other end.
 175		 */
 176		r = devm_request_mem_region(i915->drm.dev, dsm->start + 1,
 177					    resource_size(dsm) - 2,
 178					    "Graphics Stolen Memory");
 179		/*
 180		 * GEN3 firmware likes to smash pci bridges into the stolen
 181		 * range. Apparently this works.
 182		 */
 183		if (!r && GRAPHICS_VER(i915) != 3) {
 184			drm_err(&i915->drm,
 185				"conflict detected with stolen region: %pR\n",
 186				dsm);
 187
 188			return -EBUSY;
 189		}
 190	}
 191
 192	return 0;
 193}
 194
 195static void i915_gem_cleanup_stolen(struct drm_i915_private *i915)
 196{
 197	if (!drm_mm_initialized(&i915->mm.stolen))
 198		return;
 199
 200	drm_mm_takedown(&i915->mm.stolen);
 201}
 202
 203static void g4x_get_stolen_reserved(struct drm_i915_private *i915,
 204				    struct intel_uncore *uncore,
 205				    resource_size_t *base,
 206				    resource_size_t *size)
 207{
 208	u32 reg_val = intel_uncore_read(uncore,
 209					IS_GM45(i915) ?
 210					CTG_STOLEN_RESERVED :
 211					ELK_STOLEN_RESERVED);
 212	resource_size_t stolen_top = i915->dsm.stolen.end + 1;
 213
 214	drm_dbg(&i915->drm, "%s_STOLEN_RESERVED = %08x\n",
 215		IS_GM45(i915) ? "CTG" : "ELK", reg_val);
 216
 217	if ((reg_val & G4X_STOLEN_RESERVED_ENABLE) == 0)
 218		return;
 219
 220	/*
 221	 * Whether ILK really reuses the ELK register for this is unclear.
 222	 * Let's see if we catch anyone with this supposedly enabled on ILK.
 223	 */
 224	drm_WARN(&i915->drm, GRAPHICS_VER(i915) == 5,
 225		 "ILK stolen reserved found? 0x%08x\n",
 226		 reg_val);
 227
 228	if (!(reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK))
 229		return;
 230
 231	*base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
 232	drm_WARN_ON(&i915->drm,
 233		    (reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base);
 234
 235	*size = stolen_top - *base;
 236}
 237
 238static void gen6_get_stolen_reserved(struct drm_i915_private *i915,
 239				     struct intel_uncore *uncore,
 240				     resource_size_t *base,
 241				     resource_size_t *size)
 242{
 243	u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
 244
 245	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
 246
 247	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
 248		return;
 249
 250	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
 251
 252	switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) {
 253	case GEN6_STOLEN_RESERVED_1M:
 254		*size = 1024 * 1024;
 255		break;
 256	case GEN6_STOLEN_RESERVED_512K:
 257		*size = 512 * 1024;
 258		break;
 259	case GEN6_STOLEN_RESERVED_256K:
 260		*size = 256 * 1024;
 261		break;
 262	case GEN6_STOLEN_RESERVED_128K:
 263		*size = 128 * 1024;
 264		break;
 265	default:
 266		*size = 1024 * 1024;
 267		MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK);
 268	}
 269}
 270
 271static void vlv_get_stolen_reserved(struct drm_i915_private *i915,
 272				    struct intel_uncore *uncore,
 273				    resource_size_t *base,
 274				    resource_size_t *size)
 275{
 276	u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
 277	resource_size_t stolen_top = i915->dsm.stolen.end + 1;
 278
 279	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
 280
 281	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
 282		return;
 283
 284	switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
 285	default:
 286		MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
 287		fallthrough;
 288	case GEN7_STOLEN_RESERVED_1M:
 289		*size = 1024 * 1024;
 290		break;
 291	}
 292
 293	/*
 294	 * On vlv, the ADDR_MASK portion is left as 0 and HW deduces the
 295	 * reserved location as (top - size).
 296	 */
 297	*base = stolen_top - *size;
 298}
 299
 300static void gen7_get_stolen_reserved(struct drm_i915_private *i915,
 301				     struct intel_uncore *uncore,
 302				     resource_size_t *base,
 303				     resource_size_t *size)
 304{
 305	u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
 306
 307	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
 308
 309	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
 310		return;
 311
 312	*base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK;
 313
 314	switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
 315	case GEN7_STOLEN_RESERVED_1M:
 316		*size = 1024 * 1024;
 317		break;
 318	case GEN7_STOLEN_RESERVED_256K:
 319		*size = 256 * 1024;
 320		break;
 321	default:
 322		*size = 1024 * 1024;
 323		MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
 324	}
 325}
 326
 327static void chv_get_stolen_reserved(struct drm_i915_private *i915,
 328				    struct intel_uncore *uncore,
 329				    resource_size_t *base,
 330				    resource_size_t *size)
 331{
 332	u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
 333
 334	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
 335
 336	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
 337		return;
 338
 339	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
 340
 341	switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
 342	case GEN8_STOLEN_RESERVED_1M:
 343		*size = 1024 * 1024;
 344		break;
 345	case GEN8_STOLEN_RESERVED_2M:
 346		*size = 2 * 1024 * 1024;
 347		break;
 348	case GEN8_STOLEN_RESERVED_4M:
 349		*size = 4 * 1024 * 1024;
 350		break;
 351	case GEN8_STOLEN_RESERVED_8M:
 352		*size = 8 * 1024 * 1024;
 353		break;
 354	default:
 355		*size = 8 * 1024 * 1024;
 356		MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
 357	}
 358}
 359
 360static void bdw_get_stolen_reserved(struct drm_i915_private *i915,
 361				    struct intel_uncore *uncore,
 362				    resource_size_t *base,
 363				    resource_size_t *size)
 364{
 365	u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
 366	resource_size_t stolen_top = i915->dsm.stolen.end + 1;
 367
 368	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
 369
 370	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
 371		return;
 372
 373	if (!(reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK))
 374		return;
 375
 376	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
 377	*size = stolen_top - *base;
 378}
 379
 380static void icl_get_stolen_reserved(struct drm_i915_private *i915,
 381				    struct intel_uncore *uncore,
 382				    resource_size_t *base,
 383				    resource_size_t *size)
 384{
 385	u64 reg_val = intel_uncore_read64(uncore, GEN6_STOLEN_RESERVED);
 386
 387	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val);
 388
 389	/* Wa_14019821291 */
 390	if (MEDIA_VER_FULL(i915) == IP_VER(13, 0)) {
 391		/*
 392		 * This workaround is primarily implemented by the BIOS.  We
 393		 * just need to figure out whether the BIOS has applied the
 394		 * workaround (meaning the programmed address falls within
 395		 * the DSM) and, if so, reserve that part of the DSM to
 396		 * prevent accidental reuse.  The DSM location should be just
 397		 * below the WOPCM.
 398		 */
 399		u64 gscpsmi_base = intel_uncore_read64_2x32(uncore,
 400							    MTL_GSCPSMI_BASEADDR_LSB,
 401							    MTL_GSCPSMI_BASEADDR_MSB);
 402		if (gscpsmi_base >= i915->dsm.stolen.start &&
 403		    gscpsmi_base < i915->dsm.stolen.end) {
 404			*base = gscpsmi_base;
 405			*size = i915->dsm.stolen.end - gscpsmi_base;
 406			return;
 407		}
 408	}
 409
 410	switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
 411	case GEN8_STOLEN_RESERVED_1M:
 412		*size = 1024 * 1024;
 413		break;
 414	case GEN8_STOLEN_RESERVED_2M:
 415		*size = 2 * 1024 * 1024;
 416		break;
 417	case GEN8_STOLEN_RESERVED_4M:
 418		*size = 4 * 1024 * 1024;
 419		break;
 420	case GEN8_STOLEN_RESERVED_8M:
 421		*size = 8 * 1024 * 1024;
 422		break;
 423	default:
 424		*size = 8 * 1024 * 1024;
 425		MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
 426	}
 427
 428	if (HAS_LMEMBAR_SMEM_STOLEN(i915))
 429		/* the base is initialized to stolen top so subtract size to get base */
 430		*base -= *size;
 431	else
 432		*base = reg_val & GEN11_STOLEN_RESERVED_ADDR_MASK;
 433}
 434
 435/*
 436 * Initialize i915->dsm.reserved to contain the reserved space within the Data
 437 * Stolen Memory. This is a range on the top of DSM that is reserved, not to
 438 * be used by driver, so must be excluded from the region passed to the
 439 * allocator later. In the spec this is also called as WOPCM.
 440 *
 441 * Our expectation is that the reserved space is at the top of the stolen
 442 * region, as it has been the case for every platform, and *never* at the
 443 * bottom, so the calculation here can be simplified.
 444 */
 445static int init_reserved_stolen(struct drm_i915_private *i915)
 446{
 447	struct intel_uncore *uncore = &i915->uncore;
 448	resource_size_t reserved_base, stolen_top;
 449	resource_size_t reserved_size;
 450	int ret = 0;
 451
 452	stolen_top = i915->dsm.stolen.end + 1;
 453	reserved_base = stolen_top;
 454	reserved_size = 0;
 455
 456	if (GRAPHICS_VER(i915) >= 11) {
 457		icl_get_stolen_reserved(i915, uncore,
 458					&reserved_base, &reserved_size);
 459	} else if (GRAPHICS_VER(i915) >= 8) {
 460		if (IS_LP(i915))
 461			chv_get_stolen_reserved(i915, uncore,
 462						&reserved_base, &reserved_size);
 463		else
 464			bdw_get_stolen_reserved(i915, uncore,
 465						&reserved_base, &reserved_size);
 466	} else if (GRAPHICS_VER(i915) >= 7) {
 467		if (IS_VALLEYVIEW(i915))
 468			vlv_get_stolen_reserved(i915, uncore,
 469						&reserved_base, &reserved_size);
 470		else
 471			gen7_get_stolen_reserved(i915, uncore,
 472						 &reserved_base, &reserved_size);
 473	} else if (GRAPHICS_VER(i915) >= 6) {
 474		gen6_get_stolen_reserved(i915, uncore,
 475					 &reserved_base, &reserved_size);
 476	} else if (GRAPHICS_VER(i915) >= 5 || IS_G4X(i915)) {
 477		g4x_get_stolen_reserved(i915, uncore,
 478					&reserved_base, &reserved_size);
 479	}
 480
 481	/* No reserved stolen */
 482	if (reserved_base == stolen_top)
 483		goto bail_out;
 484
 485	if (!reserved_base) {
 486		drm_err(&i915->drm,
 487			"inconsistent reservation %pa + %pa; ignoring\n",
 488			&reserved_base, &reserved_size);
 489		ret = -EINVAL;
 490		goto bail_out;
 491	}
 492
 493	i915->dsm.reserved = DEFINE_RES_MEM(reserved_base, reserved_size);
 494
 495	if (!resource_contains(&i915->dsm.stolen, &i915->dsm.reserved)) {
 496		drm_err(&i915->drm,
 497			"Stolen reserved area %pR outside stolen memory %pR\n",
 498			&i915->dsm.reserved, &i915->dsm.stolen);
 499		ret = -EINVAL;
 500		goto bail_out;
 501	}
 502
 503	return 0;
 504
 505bail_out:
 506	i915->dsm.reserved = DEFINE_RES_MEM(reserved_base, 0);
 507
 508	return ret;
 509}
 510
 511static int i915_gem_init_stolen(struct intel_memory_region *mem)
 512{
 513	struct drm_i915_private *i915 = mem->i915;
 514
 515	mutex_init(&i915->mm.stolen_lock);
 516
 517	if (intel_vgpu_active(i915)) {
 518		drm_notice(&i915->drm,
 519			   "%s, disabling use of stolen memory\n",
 520			   "iGVT-g active");
 521		return -ENOSPC;
 522	}
 523
 524	if (i915_vtd_active(i915) && GRAPHICS_VER(i915) < 8) {
 525		drm_notice(&i915->drm,
 526			   "%s, disabling use of stolen memory\n",
 527			   "DMAR active");
 528		return -ENOSPC;
 529	}
 530
 531	if (adjust_stolen(i915, &mem->region))
 532		return -ENOSPC;
 533
 534	if (request_smem_stolen(i915, &mem->region))
 535		return -ENOSPC;
 536
 537	i915->dsm.stolen = mem->region;
 538
 539	if (init_reserved_stolen(i915))
 540		return -ENOSPC;
 541
 542	/* Exclude the reserved region from driver use */
 543	mem->region.end = i915->dsm.reserved.start - 1;
 544	mem->io_size = min(mem->io_size, resource_size(&mem->region));
 545
 546	i915->dsm.usable_size = resource_size(&mem->region);
 547
 548	drm_dbg(&i915->drm,
 549		"Memory reserved for graphics device: %lluK, usable: %lluK\n",
 550		(u64)resource_size(&i915->dsm.stolen) >> 10,
 551		(u64)i915->dsm.usable_size >> 10);
 552
 553	if (i915->dsm.usable_size == 0)
 554		return -ENOSPC;
 555
 556	/* Basic memrange allocator for stolen space. */
 557	drm_mm_init(&i915->mm.stolen, 0, i915->dsm.usable_size);
 558
 559	/*
 560	 * Access to stolen lmem beyond certain size for MTL A0 stepping
 561	 * would crash the machine. Disable stolen lmem for userspace access
 562	 * by setting usable_size to zero.
 563	 */
 564	if (IS_METEORLAKE(i915) && INTEL_REVID(i915) == 0x0)
 565		i915->dsm.usable_size = 0;
 566
 567	return 0;
 568}
 569
 570static void dbg_poison(struct i915_ggtt *ggtt,
 571		       dma_addr_t addr, resource_size_t size,
 572		       u8 x)
 573{
 574#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
 575	if (!drm_mm_node_allocated(&ggtt->error_capture))
 576		return;
 577
 578	if (ggtt->vm.bind_async_flags & I915_VMA_GLOBAL_BIND)
 579		return; /* beware stop_machine() inversion */
 580
 581	GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
 582
 583	mutex_lock(&ggtt->error_mutex);
 584	while (size) {
 585		void __iomem *s;
 586
 587		ggtt->vm.insert_page(&ggtt->vm, addr,
 588				     ggtt->error_capture.start,
 589				     i915_gem_get_pat_index(ggtt->vm.i915,
 590							    I915_CACHE_NONE),
 591				     0);
 592		mb();
 593
 594		s = io_mapping_map_wc(&ggtt->iomap,
 595				      ggtt->error_capture.start,
 596				      PAGE_SIZE);
 597		memset_io(s, x, PAGE_SIZE);
 598		io_mapping_unmap(s);
 599
 600		addr += PAGE_SIZE;
 601		size -= PAGE_SIZE;
 602	}
 603	mb();
 604	ggtt->vm.clear_range(&ggtt->vm, ggtt->error_capture.start, PAGE_SIZE);
 605	mutex_unlock(&ggtt->error_mutex);
 606#endif
 607}
 608
 609static struct sg_table *
 610i915_pages_create_for_stolen(struct drm_device *dev,
 611			     resource_size_t offset, resource_size_t size)
 612{
 613	struct drm_i915_private *i915 = to_i915(dev);
 614	struct sg_table *st;
 615	struct scatterlist *sg;
 616
 617	GEM_BUG_ON(range_overflows(offset, size, resource_size(&i915->dsm.stolen)));
 618
 619	/* We hide that we have no struct page backing our stolen object
 620	 * by wrapping the contiguous physical allocation with a fake
 621	 * dma mapping in a single scatterlist.
 622	 */
 623
 624	st = kmalloc(sizeof(*st), GFP_KERNEL);
 625	if (st == NULL)
 626		return ERR_PTR(-ENOMEM);
 627
 628	if (sg_alloc_table(st, 1, GFP_KERNEL)) {
 629		kfree(st);
 630		return ERR_PTR(-ENOMEM);
 631	}
 632
 633	sg = st->sgl;
 634	sg->offset = 0;
 635	sg->length = size;
 636
 637	sg_dma_address(sg) = (dma_addr_t)i915->dsm.stolen.start + offset;
 638	sg_dma_len(sg) = size;
 639
 640	return st;
 641}
 642
 643static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
 644{
 645	struct drm_i915_private *i915 = to_i915(obj->base.dev);
 646	struct sg_table *pages =
 647		i915_pages_create_for_stolen(obj->base.dev,
 648					     obj->stolen->start,
 649					     obj->stolen->size);
 650	if (IS_ERR(pages))
 651		return PTR_ERR(pages);
 652
 653	dbg_poison(to_gt(i915)->ggtt,
 654		   sg_dma_address(pages->sgl),
 655		   sg_dma_len(pages->sgl),
 656		   POISON_INUSE);
 657
 658	__i915_gem_object_set_pages(obj, pages);
 659
 660	return 0;
 661}
 662
 663static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
 664					     struct sg_table *pages)
 665{
 666	struct drm_i915_private *i915 = to_i915(obj->base.dev);
 667	/* Should only be called from i915_gem_object_release_stolen() */
 668
 669	dbg_poison(to_gt(i915)->ggtt,
 670		   sg_dma_address(pages->sgl),
 671		   sg_dma_len(pages->sgl),
 672		   POISON_FREE);
 673
 674	sg_free_table(pages);
 675	kfree(pages);
 676}
 677
 678static void
 679i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
 680{
 681	struct drm_i915_private *i915 = to_i915(obj->base.dev);
 682	struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen);
 683
 684	GEM_BUG_ON(!stolen);
 685	i915_gem_stolen_remove_node(i915, stolen);
 686	kfree(stolen);
 687
 688	i915_gem_object_release_memory_region(obj);
 689}
 690
 691static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
 692	.name = "i915_gem_object_stolen",
 693	.get_pages = i915_gem_object_get_pages_stolen,
 694	.put_pages = i915_gem_object_put_pages_stolen,
 695	.release = i915_gem_object_release_stolen,
 696};
 697
 698static int __i915_gem_object_create_stolen(struct intel_memory_region *mem,
 699					   struct drm_i915_gem_object *obj,
 700					   struct drm_mm_node *stolen)
 701{
 702	static struct lock_class_key lock_class;
 703	unsigned int cache_level;
 704	unsigned int flags;
 705	int err;
 706
 707	/*
 708	 * Stolen objects are always physically contiguous since we just
 709	 * allocate one big block underneath using the drm_mm range allocator.
 710	 */
 711	flags = I915_BO_ALLOC_CONTIGUOUS;
 712
 713	drm_gem_private_object_init(&mem->i915->drm, &obj->base, stolen->size);
 714	i915_gem_object_init(obj, &i915_gem_object_stolen_ops, &lock_class, flags);
 715
 716	obj->stolen = stolen;
 717	obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
 718	cache_level = HAS_LLC(mem->i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
 719	i915_gem_object_set_cache_coherency(obj, cache_level);
 720
 721	if (WARN_ON(!i915_gem_object_trylock(obj, NULL)))
 722		return -EBUSY;
 723
 724	i915_gem_object_init_memory_region(obj, mem);
 725
 726	err = i915_gem_object_pin_pages(obj);
 727	if (err)
 728		i915_gem_object_release_memory_region(obj);
 729	i915_gem_object_unlock(obj);
 730
 731	return err;
 732}
 733
 734static int _i915_gem_object_stolen_init(struct intel_memory_region *mem,
 735					struct drm_i915_gem_object *obj,
 736					resource_size_t offset,
 737					resource_size_t size,
 738					resource_size_t page_size,
 739					unsigned int flags)
 740{
 741	struct drm_i915_private *i915 = mem->i915;
 742	struct drm_mm_node *stolen;
 743	int ret;
 744
 745	if (!drm_mm_initialized(&i915->mm.stolen))
 746		return -ENODEV;
 747
 748	if (size == 0)
 749		return -EINVAL;
 750
 751	/*
 752	 * With discrete devices, where we lack a mappable aperture there is no
 753	 * possible way to ever access this memory on the CPU side.
 754	 */
 755	if (mem->type == INTEL_MEMORY_STOLEN_LOCAL && !mem->io_size &&
 756	    !(flags & I915_BO_ALLOC_GPU_ONLY))
 757		return -ENOSPC;
 758
 759	stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
 760	if (!stolen)
 761		return -ENOMEM;
 762
 763	if (offset != I915_BO_INVALID_OFFSET) {
 764		drm_dbg(&i915->drm,
 765			"creating preallocated stolen object: stolen_offset=%pa, size=%pa\n",
 766			&offset, &size);
 767
 768		stolen->start = offset;
 769		stolen->size = size;
 770		mutex_lock(&i915->mm.stolen_lock);
 771		ret = drm_mm_reserve_node(&i915->mm.stolen, stolen);
 772		mutex_unlock(&i915->mm.stolen_lock);
 773	} else {
 774		ret = i915_gem_stolen_insert_node(i915, stolen, size,
 775						  mem->min_page_size);
 776	}
 777	if (ret)
 778		goto err_free;
 779
 780	ret = __i915_gem_object_create_stolen(mem, obj, stolen);
 781	if (ret)
 782		goto err_remove;
 783
 784	return 0;
 785
 786err_remove:
 787	i915_gem_stolen_remove_node(i915, stolen);
 788err_free:
 789	kfree(stolen);
 790	return ret;
 791}
 792
 793struct drm_i915_gem_object *
 794i915_gem_object_create_stolen(struct drm_i915_private *i915,
 795			      resource_size_t size)
 796{
 797	return i915_gem_object_create_region(i915->mm.stolen_region, size, 0, 0);
 798}
 799
 800static int init_stolen_smem(struct intel_memory_region *mem)
 801{
 802	int err;
 803
 804	/*
 805	 * Initialise stolen early so that we may reserve preallocated
 806	 * objects for the BIOS to KMS transition.
 807	 */
 808	err = i915_gem_init_stolen(mem);
 809	if (err)
 810		drm_dbg(&mem->i915->drm, "Skip stolen region: failed to setup\n");
 811
 812	return 0;
 813}
 814
 815static int release_stolen_smem(struct intel_memory_region *mem)
 816{
 817	i915_gem_cleanup_stolen(mem->i915);
 818	return 0;
 819}
 820
 821static const struct intel_memory_region_ops i915_region_stolen_smem_ops = {
 822	.init = init_stolen_smem,
 823	.release = release_stolen_smem,
 824	.init_object = _i915_gem_object_stolen_init,
 825};
 826
 827static int init_stolen_lmem(struct intel_memory_region *mem)
 828{
 829	struct drm_i915_private *i915 = mem->i915;
 830	int err;
 831
 832	if (GEM_WARN_ON(resource_size(&mem->region) == 0))
 833		return 0;
 834
 835	err = i915_gem_init_stolen(mem);
 836	if (err) {
 837		drm_dbg(&mem->i915->drm, "Skip stolen region: failed to setup\n");
 838		return 0;
 839	}
 840
 841	if (mem->io_size &&
 842	    !io_mapping_init_wc(&mem->iomap, mem->io_start, mem->io_size))
 843		goto err_cleanup;
 844
 845	drm_dbg(&i915->drm, "Stolen Local memory IO start: %pa\n",
 846		&mem->io_start);
 847	drm_dbg(&i915->drm, "Stolen Local DSM base: %pa\n", &mem->region.start);
 848
 849	return 0;
 850
 851err_cleanup:
 852	i915_gem_cleanup_stolen(mem->i915);
 853	return err;
 854}
 855
 856static int release_stolen_lmem(struct intel_memory_region *mem)
 857{
 858	if (mem->io_size)
 859		io_mapping_fini(&mem->iomap);
 860	i915_gem_cleanup_stolen(mem->i915);
 861	return 0;
 862}
 863
 864static const struct intel_memory_region_ops i915_region_stolen_lmem_ops = {
 865	.init = init_stolen_lmem,
 866	.release = release_stolen_lmem,
 867	.init_object = _i915_gem_object_stolen_init,
 868};
 869
 870static int mtl_get_gms_size(struct intel_uncore *uncore)
 871{
 872	u16 ggc, gms;
 873
 874	ggc = intel_uncore_read16(uncore, GGC);
 875
 876	/* check GGMS, should be fixed 0x3 (8MB) */
 877	if ((ggc & GGMS_MASK) != GGMS_MASK)
 878		return -EIO;
 879
 880	/* return valid GMS value, -EIO if invalid */
 881	gms = REG_FIELD_GET(GMS_MASK, ggc);
 882	switch (gms) {
 883	case 0x0 ... 0x04:
 884		return gms * 32;
 885	case 0xf0 ... 0xfe:
 886		return (gms - 0xf0 + 1) * 4;
 887	default:
 888		MISSING_CASE(gms);
 889		return -EIO;
 890	}
 891}
 892
 893struct intel_memory_region *
 894i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type,
 895			   u16 instance)
 896{
 897	struct intel_uncore *uncore = &i915->uncore;
 898	struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
 899	resource_size_t dsm_size, dsm_base, lmem_size;
 900	struct intel_memory_region *mem;
 901	resource_size_t io_start, io_size;
 902	resource_size_t min_page_size;
 903	int ret;
 904
 905	if (WARN_ON_ONCE(instance))
 906		return ERR_PTR(-ENODEV);
 907
 908	if (!i915_pci_resource_valid(pdev, GEN12_LMEM_BAR))
 909		return ERR_PTR(-ENXIO);
 910
 911	if (HAS_LMEMBAR_SMEM_STOLEN(i915) || IS_DG1(i915)) {
 912		lmem_size = pci_resource_len(pdev, GEN12_LMEM_BAR);
 913	} else {
 914		resource_size_t lmem_range;
 915
 916		lmem_range = intel_gt_mcr_read_any(to_gt(i915), XEHP_TILE0_ADDR_RANGE) & 0xFFFF;
 917		lmem_size = lmem_range >> XEHP_TILE_LMEM_RANGE_SHIFT;
 918		lmem_size *= SZ_1G;
 919	}
 920
 921	if (HAS_LMEMBAR_SMEM_STOLEN(i915)) {
 922		/*
 923		 * MTL dsm size is in GGC register.
 924		 * Also MTL uses offset to GSMBASE in ptes, so i915
 925		 * uses dsm_base = 8MBs to setup stolen region, since
 926		 * DSMBASE = GSMBASE + 8MB.
 927		 */
 928		ret = mtl_get_gms_size(uncore);
 929		if (ret < 0) {
 930			drm_err(&i915->drm, "invalid MTL GGC register setting\n");
 931			return ERR_PTR(ret);
 932		}
 933
 934		dsm_base = SZ_8M;
 935		dsm_size = (resource_size_t)(ret * SZ_1M);
 936
 937		GEM_BUG_ON(pci_resource_len(pdev, GEN12_LMEM_BAR) != SZ_256M);
 938		GEM_BUG_ON((dsm_base + dsm_size) > lmem_size);
 939	} else {
 940		/* Use DSM base address instead for stolen memory */
 941		dsm_base = intel_uncore_read64(uncore, GEN12_DSMBASE) & GEN12_BDSM_MASK;
 942		if (WARN_ON(lmem_size < dsm_base))
 943			return ERR_PTR(-ENODEV);
 944		dsm_size = ALIGN_DOWN(lmem_size - dsm_base, SZ_1M);
 945	}
 946
 947	if (pci_resource_len(pdev, GEN12_LMEM_BAR) < lmem_size) {
 948		io_start = 0;
 949		io_size = 0;
 950	} else {
 951		io_start = pci_resource_start(pdev, GEN12_LMEM_BAR) + dsm_base;
 952		io_size = dsm_size;
 953	}
 954
 955	min_page_size = HAS_64K_PAGES(i915) ? I915_GTT_PAGE_SIZE_64K :
 956						I915_GTT_PAGE_SIZE_4K;
 957
 958	mem = intel_memory_region_create(i915, dsm_base, dsm_size,
 959					 min_page_size,
 960					 io_start, io_size,
 961					 type, instance,
 962					 &i915_region_stolen_lmem_ops);
 963	if (IS_ERR(mem))
 964		return mem;
 965
 966	intel_memory_region_set_name(mem, "stolen-local");
 967
 968	mem->private = true;
 969
 970	return mem;
 971}
 972
 973struct intel_memory_region*
 974i915_gem_stolen_smem_setup(struct drm_i915_private *i915, u16 type,
 975			   u16 instance)
 976{
 977	struct intel_memory_region *mem;
 978
 979	mem = intel_memory_region_create(i915,
 980					 intel_graphics_stolen_res.start,
 981					 resource_size(&intel_graphics_stolen_res),
 982					 PAGE_SIZE, 0, 0, type, instance,
 983					 &i915_region_stolen_smem_ops);
 984	if (IS_ERR(mem))
 985		return mem;
 986
 987	intel_memory_region_set_name(mem, "stolen-system");
 988
 989	mem->private = true;
 990
 991	return mem;
 992}
 993
 994bool i915_gem_object_is_stolen(const struct drm_i915_gem_object *obj)
 995{
 996	return obj->ops == &i915_gem_object_stolen_ops;
 997}
 998
 999bool i915_gem_stolen_initialized(const struct drm_i915_private *i915)
1000{
1001	return drm_mm_initialized(&i915->mm.stolen);
1002}
1003
1004u64 i915_gem_stolen_area_address(const struct drm_i915_private *i915)
1005{
1006	return i915->dsm.stolen.start;
1007}
1008
1009u64 i915_gem_stolen_area_size(const struct drm_i915_private *i915)
1010{
1011	return resource_size(&i915->dsm.stolen);
1012}
1013
1014u64 i915_gem_stolen_node_address(const struct drm_i915_private *i915,
1015				 const struct drm_mm_node *node)
1016{
1017	return i915->dsm.stolen.start + i915_gem_stolen_node_offset(node);
1018}
1019
1020bool i915_gem_stolen_node_allocated(const struct drm_mm_node *node)
1021{
1022	return drm_mm_node_allocated(node);
1023}
1024
1025u64 i915_gem_stolen_node_offset(const struct drm_mm_node *node)
1026{
1027	return node->start;
1028}
1029
1030u64 i915_gem_stolen_node_size(const struct drm_mm_node *node)
1031{
1032	return node->size;
1033}