Loading...
1/*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2008-2012 Intel Corporation
5 */
6
7#include <linux/errno.h>
8#include <linux/mutex.h>
9
10#include <drm/drm_mm.h>
11#include <drm/i915_drm.h>
12
13#include "gem/i915_gem_lmem.h"
14#include "gem/i915_gem_region.h"
15#include "gt/intel_gt.h"
16#include "gt/intel_gt_mcr.h"
17#include "gt/intel_gt_regs.h"
18#include "gt/intel_region_lmem.h"
19#include "i915_drv.h"
20#include "i915_gem_stolen.h"
21#include "i915_pci.h"
22#include "i915_reg.h"
23#include "i915_utils.h"
24#include "i915_vgpu.h"
25#include "intel_mchbar_regs.h"
26#include "intel_pci_config.h"
27
28/*
29 * The BIOS typically reserves some of the system's memory for the exclusive
30 * use of the integrated graphics. This memory is no longer available for
31 * use by the OS and so the user finds that his system has less memory
32 * available than he put in. We refer to this memory as stolen.
33 *
34 * The BIOS will allocate its framebuffer from the stolen memory. Our
35 * goal is try to reuse that object for our own fbcon which must always
36 * be available for panics. Anything else we can reuse the stolen memory
37 * for is a boon.
38 */
39
40int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915,
41 struct drm_mm_node *node, u64 size,
42 unsigned alignment, u64 start, u64 end)
43{
44 int ret;
45
46 if (!drm_mm_initialized(&i915->mm.stolen))
47 return -ENODEV;
48
49 /* WaSkipStolenMemoryFirstPage:bdw+ */
50 if (GRAPHICS_VER(i915) >= 8 && start < 4096)
51 start = 4096;
52
53 mutex_lock(&i915->mm.stolen_lock);
54 ret = drm_mm_insert_node_in_range(&i915->mm.stolen, node,
55 size, alignment, 0,
56 start, end, DRM_MM_INSERT_BEST);
57 mutex_unlock(&i915->mm.stolen_lock);
58
59 return ret;
60}
61
62int i915_gem_stolen_insert_node(struct drm_i915_private *i915,
63 struct drm_mm_node *node, u64 size,
64 unsigned alignment)
65{
66 return i915_gem_stolen_insert_node_in_range(i915, node,
67 size, alignment,
68 I915_GEM_STOLEN_BIAS,
69 U64_MAX);
70}
71
72void i915_gem_stolen_remove_node(struct drm_i915_private *i915,
73 struct drm_mm_node *node)
74{
75 mutex_lock(&i915->mm.stolen_lock);
76 drm_mm_remove_node(node);
77 mutex_unlock(&i915->mm.stolen_lock);
78}
79
80static bool valid_stolen_size(struct drm_i915_private *i915, struct resource *dsm)
81{
82 return (dsm->start != 0 || HAS_LMEMBAR_SMEM_STOLEN(i915)) && dsm->end > dsm->start;
83}
84
85static int adjust_stolen(struct drm_i915_private *i915,
86 struct resource *dsm)
87{
88 struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
89 struct intel_uncore *uncore = ggtt->vm.gt->uncore;
90
91 if (!valid_stolen_size(i915, dsm))
92 return -EINVAL;
93
94 /*
95 * Make sure we don't clobber the GTT if it's within stolen memory
96 *
97 * TODO: We have yet too encounter the case where the GTT wasn't at the
98 * end of stolen. With that assumption we could simplify this.
99 */
100 if (GRAPHICS_VER(i915) <= 4 &&
101 !IS_G33(i915) && !IS_PINEVIEW(i915) && !IS_G4X(i915)) {
102 struct resource stolen[2] = {*dsm, *dsm};
103 struct resource ggtt_res;
104 resource_size_t ggtt_start;
105
106 ggtt_start = intel_uncore_read(uncore, PGTBL_CTL);
107 if (GRAPHICS_VER(i915) == 4)
108 ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) |
109 (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
110 else
111 ggtt_start &= PGTBL_ADDRESS_LO_MASK;
112
113 ggtt_res = DEFINE_RES_MEM(ggtt_start, ggtt_total_entries(ggtt) * 4);
114
115 if (ggtt_res.start >= stolen[0].start && ggtt_res.start < stolen[0].end)
116 stolen[0].end = ggtt_res.start;
117 if (ggtt_res.end > stolen[1].start && ggtt_res.end <= stolen[1].end)
118 stolen[1].start = ggtt_res.end;
119
120 /* Pick the larger of the two chunks */
121 if (resource_size(&stolen[0]) > resource_size(&stolen[1]))
122 *dsm = stolen[0];
123 else
124 *dsm = stolen[1];
125
126 if (stolen[0].start != stolen[1].start ||
127 stolen[0].end != stolen[1].end) {
128 drm_dbg(&i915->drm,
129 "GTT within stolen memory at %pR\n",
130 &ggtt_res);
131 drm_dbg(&i915->drm, "Stolen memory adjusted to %pR\n",
132 dsm);
133 }
134 }
135
136 if (!valid_stolen_size(i915, dsm))
137 return -EINVAL;
138
139 return 0;
140}
141
142static int request_smem_stolen(struct drm_i915_private *i915,
143 struct resource *dsm)
144{
145 struct resource *r;
146
147 /*
148 * With stolen lmem, we don't need to request system memory for the
149 * address range since it's local to the gpu.
150 *
151 * Starting MTL, in IGFX devices the stolen memory is exposed via
152 * LMEMBAR and shall be considered similar to stolen lmem.
153 */
154 if (HAS_LMEM(i915) || HAS_LMEMBAR_SMEM_STOLEN(i915))
155 return 0;
156
157 /*
158 * Verify that nothing else uses this physical address. Stolen
159 * memory should be reserved by the BIOS and hidden from the
160 * kernel. So if the region is already marked as busy, something
161 * is seriously wrong.
162 */
163 r = devm_request_mem_region(i915->drm.dev, dsm->start,
164 resource_size(dsm),
165 "Graphics Stolen Memory");
166 if (r == NULL) {
167 /*
168 * One more attempt but this time requesting region from
169 * start + 1, as we have seen that this resolves the region
170 * conflict with the PCI Bus.
171 * This is a BIOS w/a: Some BIOS wrap stolen in the root
172 * PCI bus, but have an off-by-one error. Hence retry the
173 * reservation starting from 1 instead of 0.
174 * There's also BIOS with off-by-one on the other end.
175 */
176 r = devm_request_mem_region(i915->drm.dev, dsm->start + 1,
177 resource_size(dsm) - 2,
178 "Graphics Stolen Memory");
179 /*
180 * GEN3 firmware likes to smash pci bridges into the stolen
181 * range. Apparently this works.
182 */
183 if (!r && GRAPHICS_VER(i915) != 3) {
184 drm_err(&i915->drm,
185 "conflict detected with stolen region: %pR\n",
186 dsm);
187
188 return -EBUSY;
189 }
190 }
191
192 return 0;
193}
194
195static void i915_gem_cleanup_stolen(struct drm_i915_private *i915)
196{
197 if (!drm_mm_initialized(&i915->mm.stolen))
198 return;
199
200 drm_mm_takedown(&i915->mm.stolen);
201}
202
203static void g4x_get_stolen_reserved(struct drm_i915_private *i915,
204 struct intel_uncore *uncore,
205 resource_size_t *base,
206 resource_size_t *size)
207{
208 u32 reg_val = intel_uncore_read(uncore,
209 IS_GM45(i915) ?
210 CTG_STOLEN_RESERVED :
211 ELK_STOLEN_RESERVED);
212 resource_size_t stolen_top = i915->dsm.stolen.end + 1;
213
214 drm_dbg(&i915->drm, "%s_STOLEN_RESERVED = %08x\n",
215 IS_GM45(i915) ? "CTG" : "ELK", reg_val);
216
217 if ((reg_val & G4X_STOLEN_RESERVED_ENABLE) == 0)
218 return;
219
220 /*
221 * Whether ILK really reuses the ELK register for this is unclear.
222 * Let's see if we catch anyone with this supposedly enabled on ILK.
223 */
224 drm_WARN(&i915->drm, GRAPHICS_VER(i915) == 5,
225 "ILK stolen reserved found? 0x%08x\n",
226 reg_val);
227
228 if (!(reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK))
229 return;
230
231 *base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
232 drm_WARN_ON(&i915->drm,
233 (reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base);
234
235 *size = stolen_top - *base;
236}
237
238static void gen6_get_stolen_reserved(struct drm_i915_private *i915,
239 struct intel_uncore *uncore,
240 resource_size_t *base,
241 resource_size_t *size)
242{
243 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
244
245 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
246
247 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
248 return;
249
250 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
251
252 switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) {
253 case GEN6_STOLEN_RESERVED_1M:
254 *size = 1024 * 1024;
255 break;
256 case GEN6_STOLEN_RESERVED_512K:
257 *size = 512 * 1024;
258 break;
259 case GEN6_STOLEN_RESERVED_256K:
260 *size = 256 * 1024;
261 break;
262 case GEN6_STOLEN_RESERVED_128K:
263 *size = 128 * 1024;
264 break;
265 default:
266 *size = 1024 * 1024;
267 MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK);
268 }
269}
270
271static void vlv_get_stolen_reserved(struct drm_i915_private *i915,
272 struct intel_uncore *uncore,
273 resource_size_t *base,
274 resource_size_t *size)
275{
276 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
277 resource_size_t stolen_top = i915->dsm.stolen.end + 1;
278
279 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
280
281 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
282 return;
283
284 switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
285 default:
286 MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
287 fallthrough;
288 case GEN7_STOLEN_RESERVED_1M:
289 *size = 1024 * 1024;
290 break;
291 }
292
293 /*
294 * On vlv, the ADDR_MASK portion is left as 0 and HW deduces the
295 * reserved location as (top - size).
296 */
297 *base = stolen_top - *size;
298}
299
300static void gen7_get_stolen_reserved(struct drm_i915_private *i915,
301 struct intel_uncore *uncore,
302 resource_size_t *base,
303 resource_size_t *size)
304{
305 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
306
307 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
308
309 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
310 return;
311
312 *base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK;
313
314 switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
315 case GEN7_STOLEN_RESERVED_1M:
316 *size = 1024 * 1024;
317 break;
318 case GEN7_STOLEN_RESERVED_256K:
319 *size = 256 * 1024;
320 break;
321 default:
322 *size = 1024 * 1024;
323 MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
324 }
325}
326
327static void chv_get_stolen_reserved(struct drm_i915_private *i915,
328 struct intel_uncore *uncore,
329 resource_size_t *base,
330 resource_size_t *size)
331{
332 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
333
334 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
335
336 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
337 return;
338
339 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
340
341 switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
342 case GEN8_STOLEN_RESERVED_1M:
343 *size = 1024 * 1024;
344 break;
345 case GEN8_STOLEN_RESERVED_2M:
346 *size = 2 * 1024 * 1024;
347 break;
348 case GEN8_STOLEN_RESERVED_4M:
349 *size = 4 * 1024 * 1024;
350 break;
351 case GEN8_STOLEN_RESERVED_8M:
352 *size = 8 * 1024 * 1024;
353 break;
354 default:
355 *size = 8 * 1024 * 1024;
356 MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
357 }
358}
359
360static void bdw_get_stolen_reserved(struct drm_i915_private *i915,
361 struct intel_uncore *uncore,
362 resource_size_t *base,
363 resource_size_t *size)
364{
365 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
366 resource_size_t stolen_top = i915->dsm.stolen.end + 1;
367
368 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
369
370 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
371 return;
372
373 if (!(reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK))
374 return;
375
376 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
377 *size = stolen_top - *base;
378}
379
380static void icl_get_stolen_reserved(struct drm_i915_private *i915,
381 struct intel_uncore *uncore,
382 resource_size_t *base,
383 resource_size_t *size)
384{
385 u64 reg_val = intel_uncore_read64(uncore, GEN6_STOLEN_RESERVED);
386
387 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val);
388
389 /* Wa_14019821291 */
390 if (MEDIA_VER_FULL(i915) == IP_VER(13, 0)) {
391 /*
392 * This workaround is primarily implemented by the BIOS. We
393 * just need to figure out whether the BIOS has applied the
394 * workaround (meaning the programmed address falls within
395 * the DSM) and, if so, reserve that part of the DSM to
396 * prevent accidental reuse. The DSM location should be just
397 * below the WOPCM.
398 */
399 u64 gscpsmi_base = intel_uncore_read64_2x32(uncore,
400 MTL_GSCPSMI_BASEADDR_LSB,
401 MTL_GSCPSMI_BASEADDR_MSB);
402 if (gscpsmi_base >= i915->dsm.stolen.start &&
403 gscpsmi_base < i915->dsm.stolen.end) {
404 *base = gscpsmi_base;
405 *size = i915->dsm.stolen.end - gscpsmi_base;
406 return;
407 }
408 }
409
410 switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
411 case GEN8_STOLEN_RESERVED_1M:
412 *size = 1024 * 1024;
413 break;
414 case GEN8_STOLEN_RESERVED_2M:
415 *size = 2 * 1024 * 1024;
416 break;
417 case GEN8_STOLEN_RESERVED_4M:
418 *size = 4 * 1024 * 1024;
419 break;
420 case GEN8_STOLEN_RESERVED_8M:
421 *size = 8 * 1024 * 1024;
422 break;
423 default:
424 *size = 8 * 1024 * 1024;
425 MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
426 }
427
428 if (HAS_LMEMBAR_SMEM_STOLEN(i915))
429 /* the base is initialized to stolen top so subtract size to get base */
430 *base -= *size;
431 else
432 *base = reg_val & GEN11_STOLEN_RESERVED_ADDR_MASK;
433}
434
435/*
436 * Initialize i915->dsm.reserved to contain the reserved space within the Data
437 * Stolen Memory. This is a range on the top of DSM that is reserved, not to
438 * be used by driver, so must be excluded from the region passed to the
439 * allocator later. In the spec this is also called as WOPCM.
440 *
441 * Our expectation is that the reserved space is at the top of the stolen
442 * region, as it has been the case for every platform, and *never* at the
443 * bottom, so the calculation here can be simplified.
444 */
445static int init_reserved_stolen(struct drm_i915_private *i915)
446{
447 struct intel_uncore *uncore = &i915->uncore;
448 resource_size_t reserved_base, stolen_top;
449 resource_size_t reserved_size;
450 int ret = 0;
451
452 stolen_top = i915->dsm.stolen.end + 1;
453 reserved_base = stolen_top;
454 reserved_size = 0;
455
456 if (GRAPHICS_VER(i915) >= 11) {
457 icl_get_stolen_reserved(i915, uncore,
458 &reserved_base, &reserved_size);
459 } else if (GRAPHICS_VER(i915) >= 8) {
460 if (IS_LP(i915))
461 chv_get_stolen_reserved(i915, uncore,
462 &reserved_base, &reserved_size);
463 else
464 bdw_get_stolen_reserved(i915, uncore,
465 &reserved_base, &reserved_size);
466 } else if (GRAPHICS_VER(i915) >= 7) {
467 if (IS_VALLEYVIEW(i915))
468 vlv_get_stolen_reserved(i915, uncore,
469 &reserved_base, &reserved_size);
470 else
471 gen7_get_stolen_reserved(i915, uncore,
472 &reserved_base, &reserved_size);
473 } else if (GRAPHICS_VER(i915) >= 6) {
474 gen6_get_stolen_reserved(i915, uncore,
475 &reserved_base, &reserved_size);
476 } else if (GRAPHICS_VER(i915) >= 5 || IS_G4X(i915)) {
477 g4x_get_stolen_reserved(i915, uncore,
478 &reserved_base, &reserved_size);
479 }
480
481 /* No reserved stolen */
482 if (reserved_base == stolen_top)
483 goto bail_out;
484
485 if (!reserved_base) {
486 drm_err(&i915->drm,
487 "inconsistent reservation %pa + %pa; ignoring\n",
488 &reserved_base, &reserved_size);
489 ret = -EINVAL;
490 goto bail_out;
491 }
492
493 i915->dsm.reserved = DEFINE_RES_MEM(reserved_base, reserved_size);
494
495 if (!resource_contains(&i915->dsm.stolen, &i915->dsm.reserved)) {
496 drm_err(&i915->drm,
497 "Stolen reserved area %pR outside stolen memory %pR\n",
498 &i915->dsm.reserved, &i915->dsm.stolen);
499 ret = -EINVAL;
500 goto bail_out;
501 }
502
503 return 0;
504
505bail_out:
506 i915->dsm.reserved = DEFINE_RES_MEM(reserved_base, 0);
507
508 return ret;
509}
510
511static int i915_gem_init_stolen(struct intel_memory_region *mem)
512{
513 struct drm_i915_private *i915 = mem->i915;
514
515 mutex_init(&i915->mm.stolen_lock);
516
517 if (intel_vgpu_active(i915)) {
518 drm_notice(&i915->drm,
519 "%s, disabling use of stolen memory\n",
520 "iGVT-g active");
521 return -ENOSPC;
522 }
523
524 if (i915_vtd_active(i915) && GRAPHICS_VER(i915) < 8) {
525 drm_notice(&i915->drm,
526 "%s, disabling use of stolen memory\n",
527 "DMAR active");
528 return -ENOSPC;
529 }
530
531 if (adjust_stolen(i915, &mem->region))
532 return -ENOSPC;
533
534 if (request_smem_stolen(i915, &mem->region))
535 return -ENOSPC;
536
537 i915->dsm.stolen = mem->region;
538
539 if (init_reserved_stolen(i915))
540 return -ENOSPC;
541
542 /* Exclude the reserved region from driver use */
543 mem->region.end = i915->dsm.reserved.start - 1;
544 mem->io = DEFINE_RES_MEM(mem->io.start,
545 min(resource_size(&mem->io),
546 resource_size(&mem->region)));
547
548 i915->dsm.usable_size = resource_size(&mem->region);
549
550 drm_dbg(&i915->drm,
551 "Memory reserved for graphics device: %lluK, usable: %lluK\n",
552 (u64)resource_size(&i915->dsm.stolen) >> 10,
553 (u64)i915->dsm.usable_size >> 10);
554
555 if (i915->dsm.usable_size == 0)
556 return -ENOSPC;
557
558 /* Basic memrange allocator for stolen space. */
559 drm_mm_init(&i915->mm.stolen, 0, i915->dsm.usable_size);
560
561 /*
562 * Access to stolen lmem beyond certain size for MTL A0 stepping
563 * would crash the machine. Disable stolen lmem for userspace access
564 * by setting usable_size to zero.
565 */
566 if (IS_METEORLAKE(i915) && INTEL_REVID(i915) == 0x0)
567 i915->dsm.usable_size = 0;
568
569 return 0;
570}
571
572static void dbg_poison(struct i915_ggtt *ggtt,
573 dma_addr_t addr, resource_size_t size,
574 u8 x)
575{
576#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
577 if (!drm_mm_node_allocated(&ggtt->error_capture))
578 return;
579
580 if (ggtt->vm.bind_async_flags & I915_VMA_GLOBAL_BIND)
581 return; /* beware stop_machine() inversion */
582
583 GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
584
585 mutex_lock(&ggtt->error_mutex);
586 while (size) {
587 void __iomem *s;
588
589 ggtt->vm.insert_page(&ggtt->vm, addr,
590 ggtt->error_capture.start,
591 i915_gem_get_pat_index(ggtt->vm.i915,
592 I915_CACHE_NONE),
593 0);
594 mb();
595
596 s = io_mapping_map_wc(&ggtt->iomap,
597 ggtt->error_capture.start,
598 PAGE_SIZE);
599 memset_io(s, x, PAGE_SIZE);
600 io_mapping_unmap(s);
601
602 addr += PAGE_SIZE;
603 size -= PAGE_SIZE;
604 }
605 mb();
606 ggtt->vm.clear_range(&ggtt->vm, ggtt->error_capture.start, PAGE_SIZE);
607 mutex_unlock(&ggtt->error_mutex);
608#endif
609}
610
611static struct sg_table *
612i915_pages_create_for_stolen(struct drm_device *dev,
613 resource_size_t offset, resource_size_t size)
614{
615 struct drm_i915_private *i915 = to_i915(dev);
616 struct sg_table *st;
617 struct scatterlist *sg;
618
619 GEM_BUG_ON(range_overflows(offset, size, resource_size(&i915->dsm.stolen)));
620
621 /* We hide that we have no struct page backing our stolen object
622 * by wrapping the contiguous physical allocation with a fake
623 * dma mapping in a single scatterlist.
624 */
625
626 st = kmalloc(sizeof(*st), GFP_KERNEL);
627 if (st == NULL)
628 return ERR_PTR(-ENOMEM);
629
630 if (sg_alloc_table(st, 1, GFP_KERNEL)) {
631 kfree(st);
632 return ERR_PTR(-ENOMEM);
633 }
634
635 sg = st->sgl;
636 sg->offset = 0;
637 sg->length = size;
638
639 sg_dma_address(sg) = (dma_addr_t)i915->dsm.stolen.start + offset;
640 sg_dma_len(sg) = size;
641
642 return st;
643}
644
645static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
646{
647 struct drm_i915_private *i915 = to_i915(obj->base.dev);
648 struct sg_table *pages =
649 i915_pages_create_for_stolen(obj->base.dev,
650 obj->stolen->start,
651 obj->stolen->size);
652 if (IS_ERR(pages))
653 return PTR_ERR(pages);
654
655 dbg_poison(to_gt(i915)->ggtt,
656 sg_dma_address(pages->sgl),
657 sg_dma_len(pages->sgl),
658 POISON_INUSE);
659
660 __i915_gem_object_set_pages(obj, pages);
661
662 return 0;
663}
664
665static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
666 struct sg_table *pages)
667{
668 struct drm_i915_private *i915 = to_i915(obj->base.dev);
669 /* Should only be called from i915_gem_object_release_stolen() */
670
671 dbg_poison(to_gt(i915)->ggtt,
672 sg_dma_address(pages->sgl),
673 sg_dma_len(pages->sgl),
674 POISON_FREE);
675
676 sg_free_table(pages);
677 kfree(pages);
678}
679
680static void
681i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
682{
683 struct drm_i915_private *i915 = to_i915(obj->base.dev);
684 struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen);
685
686 GEM_BUG_ON(!stolen);
687 i915_gem_stolen_remove_node(i915, stolen);
688 kfree(stolen);
689
690 i915_gem_object_release_memory_region(obj);
691}
692
693static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
694 .name = "i915_gem_object_stolen",
695 .get_pages = i915_gem_object_get_pages_stolen,
696 .put_pages = i915_gem_object_put_pages_stolen,
697 .release = i915_gem_object_release_stolen,
698};
699
700static int __i915_gem_object_create_stolen(struct intel_memory_region *mem,
701 struct drm_i915_gem_object *obj,
702 struct drm_mm_node *stolen)
703{
704 static struct lock_class_key lock_class;
705 unsigned int cache_level;
706 unsigned int flags;
707 int err;
708
709 /*
710 * Stolen objects are always physically contiguous since we just
711 * allocate one big block underneath using the drm_mm range allocator.
712 */
713 flags = I915_BO_ALLOC_CONTIGUOUS;
714
715 drm_gem_private_object_init(&mem->i915->drm, &obj->base, stolen->size);
716 i915_gem_object_init(obj, &i915_gem_object_stolen_ops, &lock_class, flags);
717
718 obj->stolen = stolen;
719 obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
720 cache_level = HAS_LLC(mem->i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
721 i915_gem_object_set_cache_coherency(obj, cache_level);
722
723 if (WARN_ON(!i915_gem_object_trylock(obj, NULL)))
724 return -EBUSY;
725
726 i915_gem_object_init_memory_region(obj, mem);
727
728 err = i915_gem_object_pin_pages(obj);
729 if (err)
730 i915_gem_object_release_memory_region(obj);
731 i915_gem_object_unlock(obj);
732
733 return err;
734}
735
736static int _i915_gem_object_stolen_init(struct intel_memory_region *mem,
737 struct drm_i915_gem_object *obj,
738 resource_size_t offset,
739 resource_size_t size,
740 resource_size_t page_size,
741 unsigned int flags)
742{
743 struct drm_i915_private *i915 = mem->i915;
744 struct drm_mm_node *stolen;
745 int ret;
746
747 if (!drm_mm_initialized(&i915->mm.stolen))
748 return -ENODEV;
749
750 if (size == 0)
751 return -EINVAL;
752
753 /*
754 * With discrete devices, where we lack a mappable aperture there is no
755 * possible way to ever access this memory on the CPU side.
756 */
757 if (mem->type == INTEL_MEMORY_STOLEN_LOCAL && !resource_size(&mem->io) &&
758 !(flags & I915_BO_ALLOC_GPU_ONLY))
759 return -ENOSPC;
760
761 stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
762 if (!stolen)
763 return -ENOMEM;
764
765 if (offset != I915_BO_INVALID_OFFSET) {
766 drm_dbg(&i915->drm,
767 "creating preallocated stolen object: stolen_offset=%pa, size=%pa\n",
768 &offset, &size);
769
770 stolen->start = offset;
771 stolen->size = size;
772 mutex_lock(&i915->mm.stolen_lock);
773 ret = drm_mm_reserve_node(&i915->mm.stolen, stolen);
774 mutex_unlock(&i915->mm.stolen_lock);
775 } else {
776 ret = i915_gem_stolen_insert_node(i915, stolen, size,
777 mem->min_page_size);
778 }
779 if (ret)
780 goto err_free;
781
782 ret = __i915_gem_object_create_stolen(mem, obj, stolen);
783 if (ret)
784 goto err_remove;
785
786 return 0;
787
788err_remove:
789 i915_gem_stolen_remove_node(i915, stolen);
790err_free:
791 kfree(stolen);
792 return ret;
793}
794
795struct drm_i915_gem_object *
796i915_gem_object_create_stolen(struct drm_i915_private *i915,
797 resource_size_t size)
798{
799 return i915_gem_object_create_region(i915->mm.stolen_region, size, 0, 0);
800}
801
802static int init_stolen_smem(struct intel_memory_region *mem)
803{
804 int err;
805
806 /*
807 * Initialise stolen early so that we may reserve preallocated
808 * objects for the BIOS to KMS transition.
809 */
810 err = i915_gem_init_stolen(mem);
811 if (err)
812 drm_dbg(&mem->i915->drm, "Skip stolen region: failed to setup\n");
813
814 return 0;
815}
816
817static int release_stolen_smem(struct intel_memory_region *mem)
818{
819 i915_gem_cleanup_stolen(mem->i915);
820 return 0;
821}
822
823static const struct intel_memory_region_ops i915_region_stolen_smem_ops = {
824 .init = init_stolen_smem,
825 .release = release_stolen_smem,
826 .init_object = _i915_gem_object_stolen_init,
827};
828
829static int init_stolen_lmem(struct intel_memory_region *mem)
830{
831 int err;
832
833 if (GEM_WARN_ON(resource_size(&mem->region) == 0))
834 return 0;
835
836 err = i915_gem_init_stolen(mem);
837 if (err) {
838 drm_dbg(&mem->i915->drm, "Skip stolen region: failed to setup\n");
839 return 0;
840 }
841
842 if (resource_size(&mem->io) &&
843 !io_mapping_init_wc(&mem->iomap, mem->io.start, resource_size(&mem->io)))
844 goto err_cleanup;
845
846 return 0;
847
848err_cleanup:
849 i915_gem_cleanup_stolen(mem->i915);
850 return err;
851}
852
853static int release_stolen_lmem(struct intel_memory_region *mem)
854{
855 if (resource_size(&mem->io))
856 io_mapping_fini(&mem->iomap);
857 i915_gem_cleanup_stolen(mem->i915);
858 return 0;
859}
860
861static const struct intel_memory_region_ops i915_region_stolen_lmem_ops = {
862 .init = init_stolen_lmem,
863 .release = release_stolen_lmem,
864 .init_object = _i915_gem_object_stolen_init,
865};
866
867static int mtl_get_gms_size(struct intel_uncore *uncore)
868{
869 u16 ggc, gms;
870
871 ggc = intel_uncore_read16(uncore, GGC);
872
873 /* check GGMS, should be fixed 0x3 (8MB) */
874 if ((ggc & GGMS_MASK) != GGMS_MASK)
875 return -EIO;
876
877 /* return valid GMS value, -EIO if invalid */
878 gms = REG_FIELD_GET(GMS_MASK, ggc);
879 switch (gms) {
880 case 0x0 ... 0x04:
881 return gms * 32;
882 case 0xf0 ... 0xfe:
883 return (gms - 0xf0 + 1) * 4;
884 default:
885 MISSING_CASE(gms);
886 return -EIO;
887 }
888}
889
890struct intel_memory_region *
891i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type,
892 u16 instance)
893{
894 struct intel_uncore *uncore = &i915->uncore;
895 struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
896 resource_size_t dsm_size, dsm_base, lmem_size;
897 struct intel_memory_region *mem;
898 resource_size_t io_start, io_size;
899 resource_size_t min_page_size;
900 int ret;
901
902 if (WARN_ON_ONCE(instance))
903 return ERR_PTR(-ENODEV);
904
905 if (!i915_pci_resource_valid(pdev, GEN12_LMEM_BAR))
906 return ERR_PTR(-ENXIO);
907
908 if (HAS_LMEMBAR_SMEM_STOLEN(i915) || IS_DG1(i915)) {
909 lmem_size = pci_resource_len(pdev, GEN12_LMEM_BAR);
910 } else {
911 resource_size_t lmem_range;
912
913 lmem_range = intel_gt_mcr_read_any(to_gt(i915), XEHP_TILE0_ADDR_RANGE) & 0xFFFF;
914 lmem_size = lmem_range >> XEHP_TILE_LMEM_RANGE_SHIFT;
915 lmem_size *= SZ_1G;
916 }
917
918 if (HAS_LMEMBAR_SMEM_STOLEN(i915)) {
919 /*
920 * MTL dsm size is in GGC register.
921 * Also MTL uses offset to GSMBASE in ptes, so i915
922 * uses dsm_base = 8MBs to setup stolen region, since
923 * DSMBASE = GSMBASE + 8MB.
924 */
925 ret = mtl_get_gms_size(uncore);
926 if (ret < 0) {
927 drm_err(&i915->drm, "invalid MTL GGC register setting\n");
928 return ERR_PTR(ret);
929 }
930
931 dsm_base = SZ_8M;
932 dsm_size = (resource_size_t)(ret * SZ_1M);
933
934 GEM_BUG_ON(pci_resource_len(pdev, GEN12_LMEM_BAR) != SZ_256M);
935 GEM_BUG_ON((dsm_base + dsm_size) > lmem_size);
936 } else {
937 /* Use DSM base address instead for stolen memory */
938 dsm_base = intel_uncore_read64(uncore, GEN6_DSMBASE) & GEN11_BDSM_MASK;
939 if (WARN_ON(lmem_size < dsm_base))
940 return ERR_PTR(-ENODEV);
941 dsm_size = ALIGN_DOWN(lmem_size - dsm_base, SZ_1M);
942 }
943
944 if (i915_direct_stolen_access(i915)) {
945 drm_dbg(&i915->drm, "Using direct DSM access\n");
946 io_start = intel_uncore_read64(uncore, GEN6_DSMBASE) & GEN11_BDSM_MASK;
947 io_size = dsm_size;
948 } else if (pci_resource_len(pdev, GEN12_LMEM_BAR) < lmem_size) {
949 io_start = 0;
950 io_size = 0;
951 } else {
952 io_start = pci_resource_start(pdev, GEN12_LMEM_BAR) + dsm_base;
953 io_size = dsm_size;
954 }
955
956 min_page_size = HAS_64K_PAGES(i915) ? I915_GTT_PAGE_SIZE_64K :
957 I915_GTT_PAGE_SIZE_4K;
958
959 mem = intel_memory_region_create(i915, dsm_base, dsm_size,
960 min_page_size,
961 io_start, io_size,
962 type, instance,
963 &i915_region_stolen_lmem_ops);
964 if (IS_ERR(mem))
965 return mem;
966
967 intel_memory_region_set_name(mem, "stolen-local");
968
969 mem->private = true;
970
971 return mem;
972}
973
974struct intel_memory_region*
975i915_gem_stolen_smem_setup(struct drm_i915_private *i915, u16 type,
976 u16 instance)
977{
978 struct intel_memory_region *mem;
979
980 mem = intel_memory_region_create(i915,
981 intel_graphics_stolen_res.start,
982 resource_size(&intel_graphics_stolen_res),
983 PAGE_SIZE, 0, 0, type, instance,
984 &i915_region_stolen_smem_ops);
985 if (IS_ERR(mem))
986 return mem;
987
988 intel_memory_region_set_name(mem, "stolen-system");
989
990 mem->private = true;
991
992 return mem;
993}
994
995bool i915_gem_object_is_stolen(const struct drm_i915_gem_object *obj)
996{
997 return obj->ops == &i915_gem_object_stolen_ops;
998}
999
1000bool i915_gem_stolen_initialized(const struct drm_i915_private *i915)
1001{
1002 return drm_mm_initialized(&i915->mm.stolen);
1003}
1004
1005u64 i915_gem_stolen_area_address(const struct drm_i915_private *i915)
1006{
1007 return i915->dsm.stolen.start;
1008}
1009
1010u64 i915_gem_stolen_area_size(const struct drm_i915_private *i915)
1011{
1012 return resource_size(&i915->dsm.stolen);
1013}
1014
1015u64 i915_gem_stolen_node_address(const struct drm_i915_private *i915,
1016 const struct drm_mm_node *node)
1017{
1018 return i915->dsm.stolen.start + i915_gem_stolen_node_offset(node);
1019}
1020
1021bool i915_gem_stolen_node_allocated(const struct drm_mm_node *node)
1022{
1023 return drm_mm_node_allocated(node);
1024}
1025
1026u64 i915_gem_stolen_node_offset(const struct drm_mm_node *node)
1027{
1028 return node->start;
1029}
1030
1031u64 i915_gem_stolen_node_size(const struct drm_mm_node *node)
1032{
1033 return node->size;
1034}
1/*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2008-2012 Intel Corporation
5 */
6
7#include <linux/errno.h>
8#include <linux/mutex.h>
9
10#include <drm/drm_mm.h>
11#include <drm/i915_drm.h>
12
13#include "gem/i915_gem_lmem.h"
14#include "gem/i915_gem_region.h"
15#include "i915_drv.h"
16#include "i915_gem_stolen.h"
17#include "i915_vgpu.h"
18
19/*
20 * The BIOS typically reserves some of the system's memory for the exclusive
21 * use of the integrated graphics. This memory is no longer available for
22 * use by the OS and so the user finds that his system has less memory
23 * available than he put in. We refer to this memory as stolen.
24 *
25 * The BIOS will allocate its framebuffer from the stolen memory. Our
26 * goal is try to reuse that object for our own fbcon which must always
27 * be available for panics. Anything else we can reuse the stolen memory
28 * for is a boon.
29 */
30
31int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915,
32 struct drm_mm_node *node, u64 size,
33 unsigned alignment, u64 start, u64 end)
34{
35 int ret;
36
37 if (!drm_mm_initialized(&i915->mm.stolen))
38 return -ENODEV;
39
40 /* WaSkipStolenMemoryFirstPage:bdw+ */
41 if (GRAPHICS_VER(i915) >= 8 && start < 4096)
42 start = 4096;
43
44 mutex_lock(&i915->mm.stolen_lock);
45 ret = drm_mm_insert_node_in_range(&i915->mm.stolen, node,
46 size, alignment, 0,
47 start, end, DRM_MM_INSERT_BEST);
48 mutex_unlock(&i915->mm.stolen_lock);
49
50 return ret;
51}
52
53int i915_gem_stolen_insert_node(struct drm_i915_private *i915,
54 struct drm_mm_node *node, u64 size,
55 unsigned alignment)
56{
57 return i915_gem_stolen_insert_node_in_range(i915, node,
58 size, alignment,
59 I915_GEM_STOLEN_BIAS,
60 U64_MAX);
61}
62
63void i915_gem_stolen_remove_node(struct drm_i915_private *i915,
64 struct drm_mm_node *node)
65{
66 mutex_lock(&i915->mm.stolen_lock);
67 drm_mm_remove_node(node);
68 mutex_unlock(&i915->mm.stolen_lock);
69}
70
71static int i915_adjust_stolen(struct drm_i915_private *i915,
72 struct resource *dsm)
73{
74 struct i915_ggtt *ggtt = &i915->ggtt;
75 struct intel_uncore *uncore = ggtt->vm.gt->uncore;
76 struct resource *r;
77
78 if (dsm->start == 0 || dsm->end <= dsm->start)
79 return -EINVAL;
80
81 /*
82 * TODO: We have yet too encounter the case where the GTT wasn't at the
83 * end of stolen. With that assumption we could simplify this.
84 */
85
86 /* Make sure we don't clobber the GTT if it's within stolen memory */
87 if (GRAPHICS_VER(i915) <= 4 &&
88 !IS_G33(i915) && !IS_PINEVIEW(i915) && !IS_G4X(i915)) {
89 struct resource stolen[2] = {*dsm, *dsm};
90 struct resource ggtt_res;
91 resource_size_t ggtt_start;
92
93 ggtt_start = intel_uncore_read(uncore, PGTBL_CTL);
94 if (GRAPHICS_VER(i915) == 4)
95 ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) |
96 (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
97 else
98 ggtt_start &= PGTBL_ADDRESS_LO_MASK;
99
100 ggtt_res =
101 (struct resource) DEFINE_RES_MEM(ggtt_start,
102 ggtt_total_entries(ggtt) * 4);
103
104 if (ggtt_res.start >= stolen[0].start && ggtt_res.start < stolen[0].end)
105 stolen[0].end = ggtt_res.start;
106 if (ggtt_res.end > stolen[1].start && ggtt_res.end <= stolen[1].end)
107 stolen[1].start = ggtt_res.end;
108
109 /* Pick the larger of the two chunks */
110 if (resource_size(&stolen[0]) > resource_size(&stolen[1]))
111 *dsm = stolen[0];
112 else
113 *dsm = stolen[1];
114
115 if (stolen[0].start != stolen[1].start ||
116 stolen[0].end != stolen[1].end) {
117 drm_dbg(&i915->drm,
118 "GTT within stolen memory at %pR\n",
119 &ggtt_res);
120 drm_dbg(&i915->drm, "Stolen memory adjusted to %pR\n",
121 dsm);
122 }
123 }
124
125 /*
126 * With stolen lmem, we don't need to check if the address range
127 * overlaps with the non-stolen system memory range, since lmem is local
128 * to the gpu.
129 */
130 if (HAS_LMEM(i915))
131 return 0;
132
133 /*
134 * Verify that nothing else uses this physical address. Stolen
135 * memory should be reserved by the BIOS and hidden from the
136 * kernel. So if the region is already marked as busy, something
137 * is seriously wrong.
138 */
139 r = devm_request_mem_region(i915->drm.dev, dsm->start,
140 resource_size(dsm),
141 "Graphics Stolen Memory");
142 if (r == NULL) {
143 /*
144 * One more attempt but this time requesting region from
145 * start + 1, as we have seen that this resolves the region
146 * conflict with the PCI Bus.
147 * This is a BIOS w/a: Some BIOS wrap stolen in the root
148 * PCI bus, but have an off-by-one error. Hence retry the
149 * reservation starting from 1 instead of 0.
150 * There's also BIOS with off-by-one on the other end.
151 */
152 r = devm_request_mem_region(i915->drm.dev, dsm->start + 1,
153 resource_size(dsm) - 2,
154 "Graphics Stolen Memory");
155 /*
156 * GEN3 firmware likes to smash pci bridges into the stolen
157 * range. Apparently this works.
158 */
159 if (!r && GRAPHICS_VER(i915) != 3) {
160 drm_err(&i915->drm,
161 "conflict detected with stolen region: %pR\n",
162 dsm);
163
164 return -EBUSY;
165 }
166 }
167
168 return 0;
169}
170
171static void i915_gem_cleanup_stolen(struct drm_i915_private *i915)
172{
173 if (!drm_mm_initialized(&i915->mm.stolen))
174 return;
175
176 drm_mm_takedown(&i915->mm.stolen);
177}
178
179static void g4x_get_stolen_reserved(struct drm_i915_private *i915,
180 struct intel_uncore *uncore,
181 resource_size_t *base,
182 resource_size_t *size)
183{
184 u32 reg_val = intel_uncore_read(uncore,
185 IS_GM45(i915) ?
186 CTG_STOLEN_RESERVED :
187 ELK_STOLEN_RESERVED);
188 resource_size_t stolen_top = i915->dsm.end + 1;
189
190 drm_dbg(&i915->drm, "%s_STOLEN_RESERVED = %08x\n",
191 IS_GM45(i915) ? "CTG" : "ELK", reg_val);
192
193 if ((reg_val & G4X_STOLEN_RESERVED_ENABLE) == 0)
194 return;
195
196 /*
197 * Whether ILK really reuses the ELK register for this is unclear.
198 * Let's see if we catch anyone with this supposedly enabled on ILK.
199 */
200 drm_WARN(&i915->drm, GRAPHICS_VER(i915) == 5,
201 "ILK stolen reserved found? 0x%08x\n",
202 reg_val);
203
204 if (!(reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK))
205 return;
206
207 *base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
208 drm_WARN_ON(&i915->drm,
209 (reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base);
210
211 *size = stolen_top - *base;
212}
213
214static void gen6_get_stolen_reserved(struct drm_i915_private *i915,
215 struct intel_uncore *uncore,
216 resource_size_t *base,
217 resource_size_t *size)
218{
219 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
220
221 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
222
223 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
224 return;
225
226 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
227
228 switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) {
229 case GEN6_STOLEN_RESERVED_1M:
230 *size = 1024 * 1024;
231 break;
232 case GEN6_STOLEN_RESERVED_512K:
233 *size = 512 * 1024;
234 break;
235 case GEN6_STOLEN_RESERVED_256K:
236 *size = 256 * 1024;
237 break;
238 case GEN6_STOLEN_RESERVED_128K:
239 *size = 128 * 1024;
240 break;
241 default:
242 *size = 1024 * 1024;
243 MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK);
244 }
245}
246
247static void vlv_get_stolen_reserved(struct drm_i915_private *i915,
248 struct intel_uncore *uncore,
249 resource_size_t *base,
250 resource_size_t *size)
251{
252 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
253 resource_size_t stolen_top = i915->dsm.end + 1;
254
255 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
256
257 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
258 return;
259
260 switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
261 default:
262 MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
263 fallthrough;
264 case GEN7_STOLEN_RESERVED_1M:
265 *size = 1024 * 1024;
266 break;
267 }
268
269 /*
270 * On vlv, the ADDR_MASK portion is left as 0 and HW deduces the
271 * reserved location as (top - size).
272 */
273 *base = stolen_top - *size;
274}
275
276static void gen7_get_stolen_reserved(struct drm_i915_private *i915,
277 struct intel_uncore *uncore,
278 resource_size_t *base,
279 resource_size_t *size)
280{
281 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
282
283 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
284
285 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
286 return;
287
288 *base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK;
289
290 switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
291 case GEN7_STOLEN_RESERVED_1M:
292 *size = 1024 * 1024;
293 break;
294 case GEN7_STOLEN_RESERVED_256K:
295 *size = 256 * 1024;
296 break;
297 default:
298 *size = 1024 * 1024;
299 MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
300 }
301}
302
303static void chv_get_stolen_reserved(struct drm_i915_private *i915,
304 struct intel_uncore *uncore,
305 resource_size_t *base,
306 resource_size_t *size)
307{
308 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
309
310 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
311
312 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
313 return;
314
315 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
316
317 switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
318 case GEN8_STOLEN_RESERVED_1M:
319 *size = 1024 * 1024;
320 break;
321 case GEN8_STOLEN_RESERVED_2M:
322 *size = 2 * 1024 * 1024;
323 break;
324 case GEN8_STOLEN_RESERVED_4M:
325 *size = 4 * 1024 * 1024;
326 break;
327 case GEN8_STOLEN_RESERVED_8M:
328 *size = 8 * 1024 * 1024;
329 break;
330 default:
331 *size = 8 * 1024 * 1024;
332 MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
333 }
334}
335
336static void bdw_get_stolen_reserved(struct drm_i915_private *i915,
337 struct intel_uncore *uncore,
338 resource_size_t *base,
339 resource_size_t *size)
340{
341 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
342 resource_size_t stolen_top = i915->dsm.end + 1;
343
344 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
345
346 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
347 return;
348
349 if (!(reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK))
350 return;
351
352 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
353 *size = stolen_top - *base;
354}
355
356static void icl_get_stolen_reserved(struct drm_i915_private *i915,
357 struct intel_uncore *uncore,
358 resource_size_t *base,
359 resource_size_t *size)
360{
361 u64 reg_val = intel_uncore_read64(uncore, GEN6_STOLEN_RESERVED);
362
363 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val);
364
365 *base = reg_val & GEN11_STOLEN_RESERVED_ADDR_MASK;
366
367 switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
368 case GEN8_STOLEN_RESERVED_1M:
369 *size = 1024 * 1024;
370 break;
371 case GEN8_STOLEN_RESERVED_2M:
372 *size = 2 * 1024 * 1024;
373 break;
374 case GEN8_STOLEN_RESERVED_4M:
375 *size = 4 * 1024 * 1024;
376 break;
377 case GEN8_STOLEN_RESERVED_8M:
378 *size = 8 * 1024 * 1024;
379 break;
380 default:
381 *size = 8 * 1024 * 1024;
382 MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
383 }
384}
385
386static int i915_gem_init_stolen(struct intel_memory_region *mem)
387{
388 struct drm_i915_private *i915 = mem->i915;
389 struct intel_uncore *uncore = &i915->uncore;
390 resource_size_t reserved_base, stolen_top;
391 resource_size_t reserved_total, reserved_size;
392
393 mutex_init(&i915->mm.stolen_lock);
394
395 if (intel_vgpu_active(i915)) {
396 drm_notice(&i915->drm,
397 "%s, disabling use of stolen memory\n",
398 "iGVT-g active");
399 return 0;
400 }
401
402 if (intel_vtd_active() && GRAPHICS_VER(i915) < 8) {
403 drm_notice(&i915->drm,
404 "%s, disabling use of stolen memory\n",
405 "DMAR active");
406 return 0;
407 }
408
409 if (resource_size(&mem->region) == 0)
410 return 0;
411
412 i915->dsm = mem->region;
413
414 if (i915_adjust_stolen(i915, &i915->dsm))
415 return 0;
416
417 GEM_BUG_ON(i915->dsm.start == 0);
418 GEM_BUG_ON(i915->dsm.end <= i915->dsm.start);
419
420 stolen_top = i915->dsm.end + 1;
421 reserved_base = stolen_top;
422 reserved_size = 0;
423
424 switch (GRAPHICS_VER(i915)) {
425 case 2:
426 case 3:
427 break;
428 case 4:
429 if (!IS_G4X(i915))
430 break;
431 fallthrough;
432 case 5:
433 g4x_get_stolen_reserved(i915, uncore,
434 &reserved_base, &reserved_size);
435 break;
436 case 6:
437 gen6_get_stolen_reserved(i915, uncore,
438 &reserved_base, &reserved_size);
439 break;
440 case 7:
441 if (IS_VALLEYVIEW(i915))
442 vlv_get_stolen_reserved(i915, uncore,
443 &reserved_base, &reserved_size);
444 else
445 gen7_get_stolen_reserved(i915, uncore,
446 &reserved_base, &reserved_size);
447 break;
448 case 8:
449 case 9:
450 case 10:
451 if (IS_LP(i915))
452 chv_get_stolen_reserved(i915, uncore,
453 &reserved_base, &reserved_size);
454 else
455 bdw_get_stolen_reserved(i915, uncore,
456 &reserved_base, &reserved_size);
457 break;
458 default:
459 MISSING_CASE(GRAPHICS_VER(i915));
460 fallthrough;
461 case 11:
462 case 12:
463 icl_get_stolen_reserved(i915, uncore,
464 &reserved_base,
465 &reserved_size);
466 break;
467 }
468
469 /*
470 * Our expectation is that the reserved space is at the top of the
471 * stolen region and *never* at the bottom. If we see !reserved_base,
472 * it likely means we failed to read the registers correctly.
473 */
474 if (!reserved_base) {
475 drm_err(&i915->drm,
476 "inconsistent reservation %pa + %pa; ignoring\n",
477 &reserved_base, &reserved_size);
478 reserved_base = stolen_top;
479 reserved_size = 0;
480 }
481
482 i915->dsm_reserved =
483 (struct resource)DEFINE_RES_MEM(reserved_base, reserved_size);
484
485 if (!resource_contains(&i915->dsm, &i915->dsm_reserved)) {
486 drm_err(&i915->drm,
487 "Stolen reserved area %pR outside stolen memory %pR\n",
488 &i915->dsm_reserved, &i915->dsm);
489 return 0;
490 }
491
492 /* It is possible for the reserved area to end before the end of stolen
493 * memory, so just consider the start. */
494 reserved_total = stolen_top - reserved_base;
495
496 drm_dbg(&i915->drm,
497 "Memory reserved for graphics device: %lluK, usable: %lluK\n",
498 (u64)resource_size(&i915->dsm) >> 10,
499 ((u64)resource_size(&i915->dsm) - reserved_total) >> 10);
500
501 i915->stolen_usable_size =
502 resource_size(&i915->dsm) - reserved_total;
503
504 /* Basic memrange allocator for stolen space. */
505 drm_mm_init(&i915->mm.stolen, 0, i915->stolen_usable_size);
506
507 return 0;
508}
509
510static void dbg_poison(struct i915_ggtt *ggtt,
511 dma_addr_t addr, resource_size_t size,
512 u8 x)
513{
514#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
515 if (!drm_mm_node_allocated(&ggtt->error_capture))
516 return;
517
518 if (ggtt->vm.bind_async_flags & I915_VMA_GLOBAL_BIND)
519 return; /* beware stop_machine() inversion */
520
521 GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
522
523 mutex_lock(&ggtt->error_mutex);
524 while (size) {
525 void __iomem *s;
526
527 ggtt->vm.insert_page(&ggtt->vm, addr,
528 ggtt->error_capture.start,
529 I915_CACHE_NONE, 0);
530 mb();
531
532 s = io_mapping_map_wc(&ggtt->iomap,
533 ggtt->error_capture.start,
534 PAGE_SIZE);
535 memset_io(s, x, PAGE_SIZE);
536 io_mapping_unmap(s);
537
538 addr += PAGE_SIZE;
539 size -= PAGE_SIZE;
540 }
541 mb();
542 ggtt->vm.clear_range(&ggtt->vm, ggtt->error_capture.start, PAGE_SIZE);
543 mutex_unlock(&ggtt->error_mutex);
544#endif
545}
546
547static struct sg_table *
548i915_pages_create_for_stolen(struct drm_device *dev,
549 resource_size_t offset, resource_size_t size)
550{
551 struct drm_i915_private *i915 = to_i915(dev);
552 struct sg_table *st;
553 struct scatterlist *sg;
554
555 GEM_BUG_ON(range_overflows(offset, size, resource_size(&i915->dsm)));
556
557 /* We hide that we have no struct page backing our stolen object
558 * by wrapping the contiguous physical allocation with a fake
559 * dma mapping in a single scatterlist.
560 */
561
562 st = kmalloc(sizeof(*st), GFP_KERNEL);
563 if (st == NULL)
564 return ERR_PTR(-ENOMEM);
565
566 if (sg_alloc_table(st, 1, GFP_KERNEL)) {
567 kfree(st);
568 return ERR_PTR(-ENOMEM);
569 }
570
571 sg = st->sgl;
572 sg->offset = 0;
573 sg->length = size;
574
575 sg_dma_address(sg) = (dma_addr_t)i915->dsm.start + offset;
576 sg_dma_len(sg) = size;
577
578 return st;
579}
580
581static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
582{
583 struct sg_table *pages =
584 i915_pages_create_for_stolen(obj->base.dev,
585 obj->stolen->start,
586 obj->stolen->size);
587 if (IS_ERR(pages))
588 return PTR_ERR(pages);
589
590 dbg_poison(&to_i915(obj->base.dev)->ggtt,
591 sg_dma_address(pages->sgl),
592 sg_dma_len(pages->sgl),
593 POISON_INUSE);
594
595 __i915_gem_object_set_pages(obj, pages, obj->stolen->size);
596
597 return 0;
598}
599
600static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
601 struct sg_table *pages)
602{
603 /* Should only be called from i915_gem_object_release_stolen() */
604
605 dbg_poison(&to_i915(obj->base.dev)->ggtt,
606 sg_dma_address(pages->sgl),
607 sg_dma_len(pages->sgl),
608 POISON_FREE);
609
610 sg_free_table(pages);
611 kfree(pages);
612}
613
614static void
615i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
616{
617 struct drm_i915_private *i915 = to_i915(obj->base.dev);
618 struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen);
619
620 GEM_BUG_ON(!stolen);
621 i915_gem_stolen_remove_node(i915, stolen);
622 kfree(stolen);
623
624 i915_gem_object_release_memory_region(obj);
625}
626
627static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
628 .name = "i915_gem_object_stolen",
629 .get_pages = i915_gem_object_get_pages_stolen,
630 .put_pages = i915_gem_object_put_pages_stolen,
631 .release = i915_gem_object_release_stolen,
632};
633
634static int __i915_gem_object_create_stolen(struct intel_memory_region *mem,
635 struct drm_i915_gem_object *obj,
636 struct drm_mm_node *stolen)
637{
638 static struct lock_class_key lock_class;
639 unsigned int cache_level;
640 unsigned int flags;
641 int err;
642
643 /*
644 * Stolen objects are always physically contiguous since we just
645 * allocate one big block underneath using the drm_mm range allocator.
646 */
647 flags = I915_BO_ALLOC_CONTIGUOUS;
648
649 drm_gem_private_object_init(&mem->i915->drm, &obj->base, stolen->size);
650 i915_gem_object_init(obj, &i915_gem_object_stolen_ops, &lock_class, flags);
651
652 obj->stolen = stolen;
653 obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
654 cache_level = HAS_LLC(mem->i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
655 i915_gem_object_set_cache_coherency(obj, cache_level);
656
657 if (WARN_ON(!i915_gem_object_trylock(obj)))
658 return -EBUSY;
659
660 i915_gem_object_init_memory_region(obj, mem);
661
662 err = i915_gem_object_pin_pages(obj);
663 if (err)
664 i915_gem_object_release_memory_region(obj);
665 i915_gem_object_unlock(obj);
666
667 return err;
668}
669
670static int _i915_gem_object_stolen_init(struct intel_memory_region *mem,
671 struct drm_i915_gem_object *obj,
672 resource_size_t size,
673 unsigned int flags)
674{
675 struct drm_i915_private *i915 = mem->i915;
676 struct drm_mm_node *stolen;
677 int ret;
678
679 if (!drm_mm_initialized(&i915->mm.stolen))
680 return -ENODEV;
681
682 if (size == 0)
683 return -EINVAL;
684
685 stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
686 if (!stolen)
687 return -ENOMEM;
688
689 ret = i915_gem_stolen_insert_node(i915, stolen, size,
690 mem->min_page_size);
691 if (ret)
692 goto err_free;
693
694 ret = __i915_gem_object_create_stolen(mem, obj, stolen);
695 if (ret)
696 goto err_remove;
697
698 return 0;
699
700err_remove:
701 i915_gem_stolen_remove_node(i915, stolen);
702err_free:
703 kfree(stolen);
704 return ret;
705}
706
707struct drm_i915_gem_object *
708i915_gem_object_create_stolen(struct drm_i915_private *i915,
709 resource_size_t size)
710{
711 return i915_gem_object_create_region(i915->mm.stolen_region, size, 0);
712}
713
714static int init_stolen_smem(struct intel_memory_region *mem)
715{
716 /*
717 * Initialise stolen early so that we may reserve preallocated
718 * objects for the BIOS to KMS transition.
719 */
720 return i915_gem_init_stolen(mem);
721}
722
723static void release_stolen_smem(struct intel_memory_region *mem)
724{
725 i915_gem_cleanup_stolen(mem->i915);
726}
727
728static const struct intel_memory_region_ops i915_region_stolen_smem_ops = {
729 .init = init_stolen_smem,
730 .release = release_stolen_smem,
731 .init_object = _i915_gem_object_stolen_init,
732};
733
734static int init_stolen_lmem(struct intel_memory_region *mem)
735{
736 int err;
737
738 if (GEM_WARN_ON(resource_size(&mem->region) == 0))
739 return -ENODEV;
740
741 if (!io_mapping_init_wc(&mem->iomap,
742 mem->io_start,
743 resource_size(&mem->region)))
744 return -EIO;
745
746 /*
747 * TODO: For stolen lmem we mostly just care about populating the dsm
748 * related bits and setting up the drm_mm allocator for the range.
749 * Perhaps split up i915_gem_init_stolen() for this.
750 */
751 err = i915_gem_init_stolen(mem);
752 if (err)
753 goto err_fini;
754
755 return 0;
756
757err_fini:
758 io_mapping_fini(&mem->iomap);
759 return err;
760}
761
762static void release_stolen_lmem(struct intel_memory_region *mem)
763{
764 io_mapping_fini(&mem->iomap);
765 i915_gem_cleanup_stolen(mem->i915);
766}
767
768static const struct intel_memory_region_ops i915_region_stolen_lmem_ops = {
769 .init = init_stolen_lmem,
770 .release = release_stolen_lmem,
771 .init_object = _i915_gem_object_stolen_init,
772};
773
774struct intel_memory_region *
775i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type,
776 u16 instance)
777{
778 struct intel_uncore *uncore = &i915->uncore;
779 struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
780 struct intel_memory_region *mem;
781 resource_size_t io_start;
782 resource_size_t lmem_size;
783 u64 lmem_base;
784
785 lmem_base = intel_uncore_read64(uncore, GEN12_DSMBASE);
786 if (GEM_WARN_ON(lmem_base >= pci_resource_len(pdev, 2)))
787 return ERR_PTR(-ENODEV);
788
789 lmem_size = pci_resource_len(pdev, 2) - lmem_base;
790 io_start = pci_resource_start(pdev, 2) + lmem_base;
791
792 mem = intel_memory_region_create(i915, lmem_base, lmem_size,
793 I915_GTT_PAGE_SIZE_4K, io_start,
794 type, instance,
795 &i915_region_stolen_lmem_ops);
796 if (IS_ERR(mem))
797 return mem;
798
799 /*
800 * TODO: consider creating common helper to just print all the
801 * interesting stuff from intel_memory_region, which we can use for all
802 * our probed regions.
803 */
804
805 drm_dbg(&i915->drm, "Stolen Local memory IO start: %pa\n",
806 &mem->io_start);
807
808 intel_memory_region_set_name(mem, "stolen-local");
809
810 mem->private = true;
811
812 return mem;
813}
814
815struct intel_memory_region*
816i915_gem_stolen_smem_setup(struct drm_i915_private *i915, u16 type,
817 u16 instance)
818{
819 struct intel_memory_region *mem;
820
821 mem = intel_memory_region_create(i915,
822 intel_graphics_stolen_res.start,
823 resource_size(&intel_graphics_stolen_res),
824 PAGE_SIZE, 0, type, instance,
825 &i915_region_stolen_smem_ops);
826 if (IS_ERR(mem))
827 return mem;
828
829 intel_memory_region_set_name(mem, "stolen-system");
830
831 mem->private = true;
832 return mem;
833}
834
835struct drm_i915_gem_object *
836i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *i915,
837 resource_size_t stolen_offset,
838 resource_size_t size)
839{
840 struct intel_memory_region *mem = i915->mm.stolen_region;
841 struct drm_i915_gem_object *obj;
842 struct drm_mm_node *stolen;
843 int ret;
844
845 if (!drm_mm_initialized(&i915->mm.stolen))
846 return ERR_PTR(-ENODEV);
847
848 drm_dbg(&i915->drm,
849 "creating preallocated stolen object: stolen_offset=%pa, size=%pa\n",
850 &stolen_offset, &size);
851
852 /* KISS and expect everything to be page-aligned */
853 if (GEM_WARN_ON(size == 0) ||
854 GEM_WARN_ON(!IS_ALIGNED(size, mem->min_page_size)) ||
855 GEM_WARN_ON(!IS_ALIGNED(stolen_offset, mem->min_page_size)))
856 return ERR_PTR(-EINVAL);
857
858 stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
859 if (!stolen)
860 return ERR_PTR(-ENOMEM);
861
862 stolen->start = stolen_offset;
863 stolen->size = size;
864 mutex_lock(&i915->mm.stolen_lock);
865 ret = drm_mm_reserve_node(&i915->mm.stolen, stolen);
866 mutex_unlock(&i915->mm.stolen_lock);
867 if (ret)
868 goto err_free;
869
870 obj = i915_gem_object_alloc();
871 if (!obj) {
872 ret = -ENOMEM;
873 goto err_stolen;
874 }
875
876 ret = __i915_gem_object_create_stolen(mem, obj, stolen);
877 if (ret)
878 goto err_object_free;
879
880 i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
881 return obj;
882
883err_object_free:
884 i915_gem_object_free(obj);
885err_stolen:
886 i915_gem_stolen_remove_node(i915, stolen);
887err_free:
888 kfree(stolen);
889 return ERR_PTR(ret);
890}
891
892bool i915_gem_object_is_stolen(const struct drm_i915_gem_object *obj)
893{
894 return obj->ops == &i915_gem_object_stolen_ops;
895}