Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Feb 10-13, 2025
Register
Loading...
Note: File does not exist in v4.6.
  1/*
  2 * SPDX-License-Identifier: MIT
  3 *
  4 * Copyright © 2008-2012 Intel Corporation
  5 */
  6
  7#include <linux/errno.h>
  8#include <linux/mutex.h>
  9
 10#include <drm/drm_mm.h>
 11#include <drm/i915_drm.h>
 12
 13#include "gem/i915_gem_lmem.h"
 14#include "gem/i915_gem_region.h"
 15#include "gt/intel_gt.h"
 16#include "gt/intel_gt_mcr.h"
 17#include "gt/intel_gt_regs.h"
 18#include "gt/intel_region_lmem.h"
 19#include "i915_drv.h"
 20#include "i915_gem_stolen.h"
 21#include "i915_pci.h"
 22#include "i915_reg.h"
 23#include "i915_utils.h"
 24#include "i915_vgpu.h"
 25#include "intel_mchbar_regs.h"
 26#include "intel_pci_config.h"
 27
 28/*
 29 * The BIOS typically reserves some of the system's memory for the exclusive
 30 * use of the integrated graphics. This memory is no longer available for
 31 * use by the OS and so the user finds that his system has less memory
 32 * available than he put in. We refer to this memory as stolen.
 33 *
 34 * The BIOS will allocate its framebuffer from the stolen memory. Our
 35 * goal is try to reuse that object for our own fbcon which must always
 36 * be available for panics. Anything else we can reuse the stolen memory
 37 * for is a boon.
 38 */
 39
 40int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915,
 41					 struct drm_mm_node *node, u64 size,
 42					 unsigned alignment, u64 start, u64 end)
 43{
 44	int ret;
 45
 46	if (!drm_mm_initialized(&i915->mm.stolen))
 47		return -ENODEV;
 48
 49	/* WaSkipStolenMemoryFirstPage:bdw+ */
 50	if (GRAPHICS_VER(i915) >= 8 && start < 4096)
 51		start = 4096;
 52
 53	mutex_lock(&i915->mm.stolen_lock);
 54	ret = drm_mm_insert_node_in_range(&i915->mm.stolen, node,
 55					  size, alignment, 0,
 56					  start, end, DRM_MM_INSERT_BEST);
 57	mutex_unlock(&i915->mm.stolen_lock);
 58
 59	return ret;
 60}
 61
 62int i915_gem_stolen_insert_node(struct drm_i915_private *i915,
 63				struct drm_mm_node *node, u64 size,
 64				unsigned alignment)
 65{
 66	return i915_gem_stolen_insert_node_in_range(i915, node,
 67						    size, alignment,
 68						    I915_GEM_STOLEN_BIAS,
 69						    U64_MAX);
 70}
 71
 72void i915_gem_stolen_remove_node(struct drm_i915_private *i915,
 73				 struct drm_mm_node *node)
 74{
 75	mutex_lock(&i915->mm.stolen_lock);
 76	drm_mm_remove_node(node);
 77	mutex_unlock(&i915->mm.stolen_lock);
 78}
 79
 80static bool valid_stolen_size(struct drm_i915_private *i915, struct resource *dsm)
 81{
 82	return (dsm->start != 0 || HAS_LMEMBAR_SMEM_STOLEN(i915)) && dsm->end > dsm->start;
 83}
 84
 85static int adjust_stolen(struct drm_i915_private *i915,
 86			 struct resource *dsm)
 87{
 88	struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
 89	struct intel_uncore *uncore = ggtt->vm.gt->uncore;
 90
 91	if (!valid_stolen_size(i915, dsm))
 92		return -EINVAL;
 93
 94	/*
 95	 * Make sure we don't clobber the GTT if it's within stolen memory
 96	 *
 97	 * TODO: We have yet too encounter the case where the GTT wasn't at the
 98	 * end of stolen. With that assumption we could simplify this.
 99	 */
100	if (GRAPHICS_VER(i915) <= 4 &&
101	    !IS_G33(i915) && !IS_PINEVIEW(i915) && !IS_G4X(i915)) {
102		struct resource stolen[2] = {*dsm, *dsm};
103		struct resource ggtt_res;
104		resource_size_t ggtt_start;
105
106		ggtt_start = intel_uncore_read(uncore, PGTBL_CTL);
107		if (GRAPHICS_VER(i915) == 4)
108			ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) |
109				     (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
110		else
111			ggtt_start &= PGTBL_ADDRESS_LO_MASK;
112
113		ggtt_res =
114			(struct resource) DEFINE_RES_MEM(ggtt_start,
115							 ggtt_total_entries(ggtt) * 4);
116
117		if (ggtt_res.start >= stolen[0].start && ggtt_res.start < stolen[0].end)
118			stolen[0].end = ggtt_res.start;
119		if (ggtt_res.end > stolen[1].start && ggtt_res.end <= stolen[1].end)
120			stolen[1].start = ggtt_res.end;
121
122		/* Pick the larger of the two chunks */
123		if (resource_size(&stolen[0]) > resource_size(&stolen[1]))
124			*dsm = stolen[0];
125		else
126			*dsm = stolen[1];
127
128		if (stolen[0].start != stolen[1].start ||
129		    stolen[0].end != stolen[1].end) {
130			drm_dbg(&i915->drm,
131				"GTT within stolen memory at %pR\n",
132				&ggtt_res);
133			drm_dbg(&i915->drm, "Stolen memory adjusted to %pR\n",
134				dsm);
135		}
136	}
137
138	if (!valid_stolen_size(i915, dsm))
139		return -EINVAL;
140
141	return 0;
142}
143
144static int request_smem_stolen(struct drm_i915_private *i915,
145			       struct resource *dsm)
146{
147	struct resource *r;
148
149	/*
150	 * With stolen lmem, we don't need to request system memory for the
151	 * address range since it's local to the gpu.
152	 *
153	 * Starting MTL, in IGFX devices the stolen memory is exposed via
154	 * LMEMBAR and shall be considered similar to stolen lmem.
155	 */
156	if (HAS_LMEM(i915) || HAS_LMEMBAR_SMEM_STOLEN(i915))
157		return 0;
158
159	/*
160	 * Verify that nothing else uses this physical address. Stolen
161	 * memory should be reserved by the BIOS and hidden from the
162	 * kernel. So if the region is already marked as busy, something
163	 * is seriously wrong.
164	 */
165	r = devm_request_mem_region(i915->drm.dev, dsm->start,
166				    resource_size(dsm),
167				    "Graphics Stolen Memory");
168	if (r == NULL) {
169		/*
170		 * One more attempt but this time requesting region from
171		 * start + 1, as we have seen that this resolves the region
172		 * conflict with the PCI Bus.
173		 * This is a BIOS w/a: Some BIOS wrap stolen in the root
174		 * PCI bus, but have an off-by-one error. Hence retry the
175		 * reservation starting from 1 instead of 0.
176		 * There's also BIOS with off-by-one on the other end.
177		 */
178		r = devm_request_mem_region(i915->drm.dev, dsm->start + 1,
179					    resource_size(dsm) - 2,
180					    "Graphics Stolen Memory");
181		/*
182		 * GEN3 firmware likes to smash pci bridges into the stolen
183		 * range. Apparently this works.
184		 */
185		if (!r && GRAPHICS_VER(i915) != 3) {
186			drm_err(&i915->drm,
187				"conflict detected with stolen region: %pR\n",
188				dsm);
189
190			return -EBUSY;
191		}
192	}
193
194	return 0;
195}
196
197static void i915_gem_cleanup_stolen(struct drm_i915_private *i915)
198{
199	if (!drm_mm_initialized(&i915->mm.stolen))
200		return;
201
202	drm_mm_takedown(&i915->mm.stolen);
203}
204
205static void g4x_get_stolen_reserved(struct drm_i915_private *i915,
206				    struct intel_uncore *uncore,
207				    resource_size_t *base,
208				    resource_size_t *size)
209{
210	u32 reg_val = intel_uncore_read(uncore,
211					IS_GM45(i915) ?
212					CTG_STOLEN_RESERVED :
213					ELK_STOLEN_RESERVED);
214	resource_size_t stolen_top = i915->dsm.end + 1;
215
216	drm_dbg(&i915->drm, "%s_STOLEN_RESERVED = %08x\n",
217		IS_GM45(i915) ? "CTG" : "ELK", reg_val);
218
219	if ((reg_val & G4X_STOLEN_RESERVED_ENABLE) == 0)
220		return;
221
222	/*
223	 * Whether ILK really reuses the ELK register for this is unclear.
224	 * Let's see if we catch anyone with this supposedly enabled on ILK.
225	 */
226	drm_WARN(&i915->drm, GRAPHICS_VER(i915) == 5,
227		 "ILK stolen reserved found? 0x%08x\n",
228		 reg_val);
229
230	if (!(reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK))
231		return;
232
233	*base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
234	drm_WARN_ON(&i915->drm,
235		    (reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base);
236
237	*size = stolen_top - *base;
238}
239
240static void gen6_get_stolen_reserved(struct drm_i915_private *i915,
241				     struct intel_uncore *uncore,
242				     resource_size_t *base,
243				     resource_size_t *size)
244{
245	u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
246
247	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
248
249	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
250		return;
251
252	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
253
254	switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) {
255	case GEN6_STOLEN_RESERVED_1M:
256		*size = 1024 * 1024;
257		break;
258	case GEN6_STOLEN_RESERVED_512K:
259		*size = 512 * 1024;
260		break;
261	case GEN6_STOLEN_RESERVED_256K:
262		*size = 256 * 1024;
263		break;
264	case GEN6_STOLEN_RESERVED_128K:
265		*size = 128 * 1024;
266		break;
267	default:
268		*size = 1024 * 1024;
269		MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK);
270	}
271}
272
273static void vlv_get_stolen_reserved(struct drm_i915_private *i915,
274				    struct intel_uncore *uncore,
275				    resource_size_t *base,
276				    resource_size_t *size)
277{
278	u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
279	resource_size_t stolen_top = i915->dsm.end + 1;
280
281	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
282
283	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
284		return;
285
286	switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
287	default:
288		MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
289		fallthrough;
290	case GEN7_STOLEN_RESERVED_1M:
291		*size = 1024 * 1024;
292		break;
293	}
294
295	/*
296	 * On vlv, the ADDR_MASK portion is left as 0 and HW deduces the
297	 * reserved location as (top - size).
298	 */
299	*base = stolen_top - *size;
300}
301
302static void gen7_get_stolen_reserved(struct drm_i915_private *i915,
303				     struct intel_uncore *uncore,
304				     resource_size_t *base,
305				     resource_size_t *size)
306{
307	u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
308
309	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
310
311	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
312		return;
313
314	*base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK;
315
316	switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
317	case GEN7_STOLEN_RESERVED_1M:
318		*size = 1024 * 1024;
319		break;
320	case GEN7_STOLEN_RESERVED_256K:
321		*size = 256 * 1024;
322		break;
323	default:
324		*size = 1024 * 1024;
325		MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
326	}
327}
328
329static void chv_get_stolen_reserved(struct drm_i915_private *i915,
330				    struct intel_uncore *uncore,
331				    resource_size_t *base,
332				    resource_size_t *size)
333{
334	u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
335
336	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
337
338	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
339		return;
340
341	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
342
343	switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
344	case GEN8_STOLEN_RESERVED_1M:
345		*size = 1024 * 1024;
346		break;
347	case GEN8_STOLEN_RESERVED_2M:
348		*size = 2 * 1024 * 1024;
349		break;
350	case GEN8_STOLEN_RESERVED_4M:
351		*size = 4 * 1024 * 1024;
352		break;
353	case GEN8_STOLEN_RESERVED_8M:
354		*size = 8 * 1024 * 1024;
355		break;
356	default:
357		*size = 8 * 1024 * 1024;
358		MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
359	}
360}
361
362static void bdw_get_stolen_reserved(struct drm_i915_private *i915,
363				    struct intel_uncore *uncore,
364				    resource_size_t *base,
365				    resource_size_t *size)
366{
367	u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
368	resource_size_t stolen_top = i915->dsm.end + 1;
369
370	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
371
372	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
373		return;
374
375	if (!(reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK))
376		return;
377
378	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
379	*size = stolen_top - *base;
380}
381
382static void icl_get_stolen_reserved(struct drm_i915_private *i915,
383				    struct intel_uncore *uncore,
384				    resource_size_t *base,
385				    resource_size_t *size)
386{
387	u64 reg_val = intel_uncore_read64(uncore, GEN6_STOLEN_RESERVED);
388
389	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val);
390
391	switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
392	case GEN8_STOLEN_RESERVED_1M:
393		*size = 1024 * 1024;
394		break;
395	case GEN8_STOLEN_RESERVED_2M:
396		*size = 2 * 1024 * 1024;
397		break;
398	case GEN8_STOLEN_RESERVED_4M:
399		*size = 4 * 1024 * 1024;
400		break;
401	case GEN8_STOLEN_RESERVED_8M:
402		*size = 8 * 1024 * 1024;
403		break;
404	default:
405		*size = 8 * 1024 * 1024;
406		MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
407	}
408
409	if (HAS_LMEMBAR_SMEM_STOLEN(i915))
410		/* the base is initialized to stolen top so subtract size to get base */
411		*base -= *size;
412	else
413		*base = reg_val & GEN11_STOLEN_RESERVED_ADDR_MASK;
414}
415
416/*
417 * Initialize i915->dsm_reserved to contain the reserved space within the Data
418 * Stolen Memory. This is a range on the top of DSM that is reserved, not to
419 * be used by driver, so must be excluded from the region passed to the
420 * allocator later. In the spec this is also called as WOPCM.
421 *
422 * Our expectation is that the reserved space is at the top of the stolen
423 * region, as it has been the case for every platform, and *never* at the
424 * bottom, so the calculation here can be simplified.
425 */
426static int init_reserved_stolen(struct drm_i915_private *i915)
427{
428	struct intel_uncore *uncore = &i915->uncore;
429	resource_size_t reserved_base, stolen_top;
430	resource_size_t reserved_size;
431	int ret = 0;
432
433	stolen_top = i915->dsm.end + 1;
434	reserved_base = stolen_top;
435	reserved_size = 0;
436
437	if (GRAPHICS_VER(i915) >= 11) {
438		icl_get_stolen_reserved(i915, uncore,
439					&reserved_base, &reserved_size);
440	} else if (GRAPHICS_VER(i915) >= 8) {
441		if (IS_LP(i915))
442			chv_get_stolen_reserved(i915, uncore,
443						&reserved_base, &reserved_size);
444		else
445			bdw_get_stolen_reserved(i915, uncore,
446						&reserved_base, &reserved_size);
447	} else if (GRAPHICS_VER(i915) >= 7) {
448		if (IS_VALLEYVIEW(i915))
449			vlv_get_stolen_reserved(i915, uncore,
450						&reserved_base, &reserved_size);
451		else
452			gen7_get_stolen_reserved(i915, uncore,
453						 &reserved_base, &reserved_size);
454	} else if (GRAPHICS_VER(i915) >= 6) {
455		gen6_get_stolen_reserved(i915, uncore,
456					 &reserved_base, &reserved_size);
457	} else if (GRAPHICS_VER(i915) >= 5 || IS_G4X(i915)) {
458		g4x_get_stolen_reserved(i915, uncore,
459					&reserved_base, &reserved_size);
460	}
461
462	/* No reserved stolen */
463	if (reserved_base == stolen_top)
464		goto bail_out;
465
466	if (!reserved_base) {
467		drm_err(&i915->drm,
468			"inconsistent reservation %pa + %pa; ignoring\n",
469			&reserved_base, &reserved_size);
470		ret = -EINVAL;
471		goto bail_out;
472	}
473
474	i915->dsm_reserved =
475		(struct resource)DEFINE_RES_MEM(reserved_base, reserved_size);
476
477	if (!resource_contains(&i915->dsm, &i915->dsm_reserved)) {
478		drm_err(&i915->drm,
479			"Stolen reserved area %pR outside stolen memory %pR\n",
480			&i915->dsm_reserved, &i915->dsm);
481		ret = -EINVAL;
482		goto bail_out;
483	}
484
485	return 0;
486
487bail_out:
488	i915->dsm_reserved =
489		(struct resource)DEFINE_RES_MEM(reserved_base, 0);
490
491	return ret;
492}
493
494static int i915_gem_init_stolen(struct intel_memory_region *mem)
495{
496	struct drm_i915_private *i915 = mem->i915;
497
498	mutex_init(&i915->mm.stolen_lock);
499
500	if (intel_vgpu_active(i915)) {
501		drm_notice(&i915->drm,
502			   "%s, disabling use of stolen memory\n",
503			   "iGVT-g active");
504		return -ENOSPC;
505	}
506
507	if (i915_vtd_active(i915) && GRAPHICS_VER(i915) < 8) {
508		drm_notice(&i915->drm,
509			   "%s, disabling use of stolen memory\n",
510			   "DMAR active");
511		return -ENOSPC;
512	}
513
514	if (adjust_stolen(i915, &mem->region))
515		return -ENOSPC;
516
517	if (request_smem_stolen(i915, &mem->region))
518		return -ENOSPC;
519
520	i915->dsm = mem->region;
521
522	if (init_reserved_stolen(i915))
523		return -ENOSPC;
524
525	/* Exclude the reserved region from driver use */
526	mem->region.end = i915->dsm_reserved.start - 1;
527	mem->io_size = min(mem->io_size, resource_size(&mem->region));
528
529	i915->stolen_usable_size = resource_size(&mem->region);
530
531	drm_dbg(&i915->drm,
532		"Memory reserved for graphics device: %lluK, usable: %lluK\n",
533		(u64)resource_size(&i915->dsm) >> 10,
534		(u64)i915->stolen_usable_size >> 10);
535
536	if (i915->stolen_usable_size == 0)
537		return -ENOSPC;
538
539	/* Basic memrange allocator for stolen space. */
540	drm_mm_init(&i915->mm.stolen, 0, i915->stolen_usable_size);
541
542	return 0;
543}
544
545static void dbg_poison(struct i915_ggtt *ggtt,
546		       dma_addr_t addr, resource_size_t size,
547		       u8 x)
548{
549#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
550	if (!drm_mm_node_allocated(&ggtt->error_capture))
551		return;
552
553	if (ggtt->vm.bind_async_flags & I915_VMA_GLOBAL_BIND)
554		return; /* beware stop_machine() inversion */
555
556	GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
557
558	mutex_lock(&ggtt->error_mutex);
559	while (size) {
560		void __iomem *s;
561
562		ggtt->vm.insert_page(&ggtt->vm, addr,
563				     ggtt->error_capture.start,
564				     I915_CACHE_NONE, 0);
565		mb();
566
567		s = io_mapping_map_wc(&ggtt->iomap,
568				      ggtt->error_capture.start,
569				      PAGE_SIZE);
570		memset_io(s, x, PAGE_SIZE);
571		io_mapping_unmap(s);
572
573		addr += PAGE_SIZE;
574		size -= PAGE_SIZE;
575	}
576	mb();
577	ggtt->vm.clear_range(&ggtt->vm, ggtt->error_capture.start, PAGE_SIZE);
578	mutex_unlock(&ggtt->error_mutex);
579#endif
580}
581
582static struct sg_table *
583i915_pages_create_for_stolen(struct drm_device *dev,
584			     resource_size_t offset, resource_size_t size)
585{
586	struct drm_i915_private *i915 = to_i915(dev);
587	struct sg_table *st;
588	struct scatterlist *sg;
589
590	GEM_BUG_ON(range_overflows(offset, size, resource_size(&i915->dsm)));
591
592	/* We hide that we have no struct page backing our stolen object
593	 * by wrapping the contiguous physical allocation with a fake
594	 * dma mapping in a single scatterlist.
595	 */
596
597	st = kmalloc(sizeof(*st), GFP_KERNEL);
598	if (st == NULL)
599		return ERR_PTR(-ENOMEM);
600
601	if (sg_alloc_table(st, 1, GFP_KERNEL)) {
602		kfree(st);
603		return ERR_PTR(-ENOMEM);
604	}
605
606	sg = st->sgl;
607	sg->offset = 0;
608	sg->length = size;
609
610	sg_dma_address(sg) = (dma_addr_t)i915->dsm.start + offset;
611	sg_dma_len(sg) = size;
612
613	return st;
614}
615
616static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
617{
618	struct drm_i915_private *i915 = to_i915(obj->base.dev);
619	struct sg_table *pages =
620		i915_pages_create_for_stolen(obj->base.dev,
621					     obj->stolen->start,
622					     obj->stolen->size);
623	if (IS_ERR(pages))
624		return PTR_ERR(pages);
625
626	dbg_poison(to_gt(i915)->ggtt,
627		   sg_dma_address(pages->sgl),
628		   sg_dma_len(pages->sgl),
629		   POISON_INUSE);
630
631	__i915_gem_object_set_pages(obj, pages);
632
633	return 0;
634}
635
636static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
637					     struct sg_table *pages)
638{
639	struct drm_i915_private *i915 = to_i915(obj->base.dev);
640	/* Should only be called from i915_gem_object_release_stolen() */
641
642	dbg_poison(to_gt(i915)->ggtt,
643		   sg_dma_address(pages->sgl),
644		   sg_dma_len(pages->sgl),
645		   POISON_FREE);
646
647	sg_free_table(pages);
648	kfree(pages);
649}
650
651static void
652i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
653{
654	struct drm_i915_private *i915 = to_i915(obj->base.dev);
655	struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen);
656
657	GEM_BUG_ON(!stolen);
658	i915_gem_stolen_remove_node(i915, stolen);
659	kfree(stolen);
660
661	i915_gem_object_release_memory_region(obj);
662}
663
664static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
665	.name = "i915_gem_object_stolen",
666	.get_pages = i915_gem_object_get_pages_stolen,
667	.put_pages = i915_gem_object_put_pages_stolen,
668	.release = i915_gem_object_release_stolen,
669};
670
671static int __i915_gem_object_create_stolen(struct intel_memory_region *mem,
672					   struct drm_i915_gem_object *obj,
673					   struct drm_mm_node *stolen)
674{
675	static struct lock_class_key lock_class;
676	unsigned int cache_level;
677	unsigned int flags;
678	int err;
679
680	/*
681	 * Stolen objects are always physically contiguous since we just
682	 * allocate one big block underneath using the drm_mm range allocator.
683	 */
684	flags = I915_BO_ALLOC_CONTIGUOUS;
685
686	drm_gem_private_object_init(&mem->i915->drm, &obj->base, stolen->size);
687	i915_gem_object_init(obj, &i915_gem_object_stolen_ops, &lock_class, flags);
688
689	obj->stolen = stolen;
690	obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
691	cache_level = HAS_LLC(mem->i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
692	i915_gem_object_set_cache_coherency(obj, cache_level);
693
694	if (WARN_ON(!i915_gem_object_trylock(obj, NULL)))
695		return -EBUSY;
696
697	i915_gem_object_init_memory_region(obj, mem);
698
699	err = i915_gem_object_pin_pages(obj);
700	if (err)
701		i915_gem_object_release_memory_region(obj);
702	i915_gem_object_unlock(obj);
703
704	return err;
705}
706
707static int _i915_gem_object_stolen_init(struct intel_memory_region *mem,
708					struct drm_i915_gem_object *obj,
709					resource_size_t offset,
710					resource_size_t size,
711					resource_size_t page_size,
712					unsigned int flags)
713{
714	struct drm_i915_private *i915 = mem->i915;
715	struct drm_mm_node *stolen;
716	int ret;
717
718	if (!drm_mm_initialized(&i915->mm.stolen))
719		return -ENODEV;
720
721	if (size == 0)
722		return -EINVAL;
723
724	/*
725	 * With discrete devices, where we lack a mappable aperture there is no
726	 * possible way to ever access this memory on the CPU side.
727	 */
728	if (mem->type == INTEL_MEMORY_STOLEN_LOCAL && !mem->io_size &&
729	    !(flags & I915_BO_ALLOC_GPU_ONLY))
730		return -ENOSPC;
731
732	stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
733	if (!stolen)
734		return -ENOMEM;
735
736	if (offset != I915_BO_INVALID_OFFSET) {
737		drm_dbg(&i915->drm,
738			"creating preallocated stolen object: stolen_offset=%pa, size=%pa\n",
739			&offset, &size);
740
741		stolen->start = offset;
742		stolen->size = size;
743		mutex_lock(&i915->mm.stolen_lock);
744		ret = drm_mm_reserve_node(&i915->mm.stolen, stolen);
745		mutex_unlock(&i915->mm.stolen_lock);
746	} else {
747		ret = i915_gem_stolen_insert_node(i915, stolen, size,
748						  mem->min_page_size);
749	}
750	if (ret)
751		goto err_free;
752
753	ret = __i915_gem_object_create_stolen(mem, obj, stolen);
754	if (ret)
755		goto err_remove;
756
757	return 0;
758
759err_remove:
760	i915_gem_stolen_remove_node(i915, stolen);
761err_free:
762	kfree(stolen);
763	return ret;
764}
765
766struct drm_i915_gem_object *
767i915_gem_object_create_stolen(struct drm_i915_private *i915,
768			      resource_size_t size)
769{
770	return i915_gem_object_create_region(i915->mm.stolen_region, size, 0, 0);
771}
772
773static int init_stolen_smem(struct intel_memory_region *mem)
774{
775	int err;
776
777	/*
778	 * Initialise stolen early so that we may reserve preallocated
779	 * objects for the BIOS to KMS transition.
780	 */
781	err = i915_gem_init_stolen(mem);
782	if (err)
783		drm_dbg(&mem->i915->drm, "Skip stolen region: failed to setup\n");
784
785	return 0;
786}
787
788static int release_stolen_smem(struct intel_memory_region *mem)
789{
790	i915_gem_cleanup_stolen(mem->i915);
791	return 0;
792}
793
794static const struct intel_memory_region_ops i915_region_stolen_smem_ops = {
795	.init = init_stolen_smem,
796	.release = release_stolen_smem,
797	.init_object = _i915_gem_object_stolen_init,
798};
799
800static int init_stolen_lmem(struct intel_memory_region *mem)
801{
802	struct drm_i915_private *i915 = mem->i915;
803	int err;
804
805	if (GEM_WARN_ON(resource_size(&mem->region) == 0))
806		return 0;
807
808	err = i915_gem_init_stolen(mem);
809	if (err) {
810		drm_dbg(&mem->i915->drm, "Skip stolen region: failed to setup\n");
811		return 0;
812	}
813
814	if (mem->io_size &&
815	    !io_mapping_init_wc(&mem->iomap, mem->io_start, mem->io_size))
816		goto err_cleanup;
817
818	drm_dbg(&i915->drm, "Stolen Local memory IO start: %pa\n",
819		&mem->io_start);
820	drm_dbg(&i915->drm, "Stolen Local DSM base: %pa\n", &mem->region.start);
821
822	return 0;
823
824err_cleanup:
825	i915_gem_cleanup_stolen(mem->i915);
826	return err;
827}
828
829static int release_stolen_lmem(struct intel_memory_region *mem)
830{
831	if (mem->io_size)
832		io_mapping_fini(&mem->iomap);
833	i915_gem_cleanup_stolen(mem->i915);
834	return 0;
835}
836
837static const struct intel_memory_region_ops i915_region_stolen_lmem_ops = {
838	.init = init_stolen_lmem,
839	.release = release_stolen_lmem,
840	.init_object = _i915_gem_object_stolen_init,
841};
842
843static int mtl_get_gms_size(struct intel_uncore *uncore)
844{
845	u16 ggc, gms;
846
847	ggc = intel_uncore_read16(uncore, GGC);
848
849	/* check GGMS, should be fixed 0x3 (8MB) */
850	if ((ggc & GGMS_MASK) != GGMS_MASK)
851		return -EIO;
852
853	/* return valid GMS value, -EIO if invalid */
854	gms = REG_FIELD_GET(GMS_MASK, ggc);
855	switch (gms) {
856	case 0x0 ... 0x04:
857		return gms * 32;
858	case 0xf0 ... 0xfe:
859		return (gms - 0xf0 + 1) * 4;
860	default:
861		MISSING_CASE(gms);
862		return -EIO;
863	}
864}
865
866struct intel_memory_region *
867i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type,
868			   u16 instance)
869{
870	struct intel_uncore *uncore = &i915->uncore;
871	struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
872	resource_size_t dsm_size, dsm_base, lmem_size;
873	struct intel_memory_region *mem;
874	resource_size_t io_start, io_size;
875	resource_size_t min_page_size;
876	int ret;
877
878	if (WARN_ON_ONCE(instance))
879		return ERR_PTR(-ENODEV);
880
881	if (!i915_pci_resource_valid(pdev, GEN12_LMEM_BAR))
882		return ERR_PTR(-ENXIO);
883
884	if (HAS_LMEMBAR_SMEM_STOLEN(i915) || IS_DG1(i915)) {
885		lmem_size = pci_resource_len(pdev, GEN12_LMEM_BAR);
886	} else {
887		resource_size_t lmem_range;
888
889		lmem_range = intel_gt_mcr_read_any(&i915->gt0, XEHP_TILE0_ADDR_RANGE) & 0xFFFF;
890		lmem_size = lmem_range >> XEHP_TILE_LMEM_RANGE_SHIFT;
891		lmem_size *= SZ_1G;
892	}
893
894	if (HAS_LMEMBAR_SMEM_STOLEN(i915)) {
895		/*
896		 * MTL dsm size is in GGC register.
897		 * Also MTL uses offset to DSMBASE in ptes, so i915
898		 * uses dsm_base = 0 to setup stolen region.
899		 */
900		ret = mtl_get_gms_size(uncore);
901		if (ret < 0) {
902			drm_err(&i915->drm, "invalid MTL GGC register setting\n");
903			return ERR_PTR(ret);
904		}
905
906		dsm_base = 0;
907		dsm_size = (resource_size_t)(ret * SZ_1M);
908
909		GEM_BUG_ON(pci_resource_len(pdev, GEN12_LMEM_BAR) != SZ_256M);
910		GEM_BUG_ON((dsm_size + SZ_8M) > lmem_size);
911	} else {
912		/* Use DSM base address instead for stolen memory */
913		dsm_base = intel_uncore_read64(uncore, GEN12_DSMBASE) & GEN12_BDSM_MASK;
914		if (WARN_ON(lmem_size < dsm_base))
915			return ERR_PTR(-ENODEV);
916		dsm_size = lmem_size - dsm_base;
917	}
918
919	io_size = dsm_size;
920	if (HAS_LMEMBAR_SMEM_STOLEN(i915)) {
921		io_start = pci_resource_start(pdev, GEN12_LMEM_BAR) + SZ_8M;
922	} else if (pci_resource_len(pdev, GEN12_LMEM_BAR) < lmem_size) {
923		io_start = 0;
924		io_size = 0;
925	} else {
926		io_start = pci_resource_start(pdev, GEN12_LMEM_BAR) + dsm_base;
927	}
928
929	min_page_size = HAS_64K_PAGES(i915) ? I915_GTT_PAGE_SIZE_64K :
930						I915_GTT_PAGE_SIZE_4K;
931
932	mem = intel_memory_region_create(i915, dsm_base, dsm_size,
933					 min_page_size,
934					 io_start, io_size,
935					 type, instance,
936					 &i915_region_stolen_lmem_ops);
937	if (IS_ERR(mem))
938		return mem;
939
940	intel_memory_region_set_name(mem, "stolen-local");
941
942	mem->private = true;
943
944	return mem;
945}
946
947struct intel_memory_region*
948i915_gem_stolen_smem_setup(struct drm_i915_private *i915, u16 type,
949			   u16 instance)
950{
951	struct intel_memory_region *mem;
952
953	mem = intel_memory_region_create(i915,
954					 intel_graphics_stolen_res.start,
955					 resource_size(&intel_graphics_stolen_res),
956					 PAGE_SIZE, 0, 0, type, instance,
957					 &i915_region_stolen_smem_ops);
958	if (IS_ERR(mem))
959		return mem;
960
961	intel_memory_region_set_name(mem, "stolen-system");
962
963	mem->private = true;
964
965	return mem;
966}
967
968bool i915_gem_object_is_stolen(const struct drm_i915_gem_object *obj)
969{
970	return obj->ops == &i915_gem_object_stolen_ops;
971}