Loading...
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2021 Intel Corporation
4 */
5
6#include "xe_ggtt.h"
7
8#include <linux/fault-inject.h>
9#include <linux/io-64-nonatomic-lo-hi.h>
10#include <linux/sizes.h>
11
12#include <drm/drm_drv.h>
13#include <drm/drm_managed.h>
14#include <drm/intel/i915_drm.h>
15#include <generated/xe_wa_oob.h>
16
17#include "regs/xe_gt_regs.h"
18#include "regs/xe_gtt_defs.h"
19#include "regs/xe_regs.h"
20#include "xe_assert.h"
21#include "xe_bo.h"
22#include "xe_device.h"
23#include "xe_gt.h"
24#include "xe_gt_printk.h"
25#include "xe_gt_sriov_vf.h"
26#include "xe_gt_tlb_invalidation.h"
27#include "xe_map.h"
28#include "xe_mmio.h"
29#include "xe_pm.h"
30#include "xe_sriov.h"
31#include "xe_wa.h"
32#include "xe_wopcm.h"
33
34/**
35 * DOC: Global Graphics Translation Table (GGTT)
36 *
37 * Xe GGTT implements the support for a Global Virtual Address space that is used
38 * for resources that are accessible to privileged (i.e. kernel-mode) processes,
39 * and not tied to a specific user-level process. For example, the Graphics
40 * micro-Controller (GuC) and Display Engine (if present) utilize this Global
41 * address space.
42 *
43 * The Global GTT (GGTT) translates from the Global virtual address to a physical
44 * address that can be accessed by HW. The GGTT is a flat, single-level table.
45 *
46 * Xe implements a simplified version of the GGTT specifically managing only a
47 * certain range of it that goes from the Write Once Protected Content Memory (WOPCM)
48 * Layout to a predefined GUC_GGTT_TOP. This approach avoids complications related to
49 * the GuC (Graphics Microcontroller) hardware limitations. The GuC address space
50 * is limited on both ends of the GGTT, because the GuC shim HW redirects
51 * accesses to those addresses to other HW areas instead of going through the
52 * GGTT. On the bottom end, the GuC can't access offsets below the WOPCM size,
53 * while on the top side the limit is fixed at GUC_GGTT_TOP. To keep things
54 * simple, instead of checking each object to see if they are accessed by GuC or
55 * not, we just exclude those areas from the allocator. Additionally, to simplify
56 * the driver load, we use the maximum WOPCM size in this logic instead of the
57 * programmed one, so we don't need to wait until the actual size to be
58 * programmed is determined (which requires FW fetch) before initializing the
59 * GGTT. These simplifications might waste space in the GGTT (about 20-25 MBs
60 * depending on the platform) but we can live with this. Another benefit of this
61 * is the GuC bootrom can't access anything below the WOPCM max size so anything
62 * the bootrom needs to access (e.g. a RSA key) needs to be placed in the GGTT
63 * above the WOPCM max size. Starting the GGTT allocations above the WOPCM max
64 * give us the correct placement for free.
65 */
66
67static u64 xelp_ggtt_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
68 u16 pat_index)
69{
70 u64 pte;
71
72 pte = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
73 pte |= XE_PAGE_PRESENT;
74
75 if (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo))
76 pte |= XE_GGTT_PTE_DM;
77
78 return pte;
79}
80
81static u64 xelpg_ggtt_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
82 u16 pat_index)
83{
84 struct xe_device *xe = xe_bo_device(bo);
85 u64 pte;
86
87 pte = xelp_ggtt_pte_encode_bo(bo, bo_offset, pat_index);
88
89 xe_assert(xe, pat_index <= 3);
90
91 if (pat_index & BIT(0))
92 pte |= XELPG_GGTT_PTE_PAT0;
93
94 if (pat_index & BIT(1))
95 pte |= XELPG_GGTT_PTE_PAT1;
96
97 return pte;
98}
99
100static unsigned int probe_gsm_size(struct pci_dev *pdev)
101{
102 u16 gmch_ctl, ggms;
103
104 pci_read_config_word(pdev, SNB_GMCH_CTRL, &gmch_ctl);
105 ggms = (gmch_ctl >> BDW_GMCH_GGMS_SHIFT) & BDW_GMCH_GGMS_MASK;
106 return ggms ? SZ_1M << ggms : 0;
107}
108
109static void ggtt_update_access_counter(struct xe_ggtt *ggtt)
110{
111 struct xe_tile *tile = ggtt->tile;
112 struct xe_gt *affected_gt = XE_WA(tile->primary_gt, 22019338487) ?
113 tile->primary_gt : tile->media_gt;
114 struct xe_mmio *mmio = &affected_gt->mmio;
115 u32 max_gtt_writes = XE_WA(ggtt->tile->primary_gt, 22019338487) ? 1100 : 63;
116 /*
117 * Wa_22019338487: GMD_ID is a RO register, a dummy write forces gunit
118 * to wait for completion of prior GTT writes before letting this through.
119 * This needs to be done for all GGTT writes originating from the CPU.
120 */
121 lockdep_assert_held(&ggtt->lock);
122
123 if ((++ggtt->access_count % max_gtt_writes) == 0) {
124 xe_mmio_write32(mmio, GMD_ID, 0x0);
125 ggtt->access_count = 0;
126 }
127}
128
129static void xe_ggtt_set_pte(struct xe_ggtt *ggtt, u64 addr, u64 pte)
130{
131 xe_tile_assert(ggtt->tile, !(addr & XE_PTE_MASK));
132 xe_tile_assert(ggtt->tile, addr < ggtt->size);
133
134 writeq(pte, &ggtt->gsm[addr >> XE_PTE_SHIFT]);
135}
136
137static void xe_ggtt_set_pte_and_flush(struct xe_ggtt *ggtt, u64 addr, u64 pte)
138{
139 xe_ggtt_set_pte(ggtt, addr, pte);
140 ggtt_update_access_counter(ggtt);
141}
142
143static void xe_ggtt_clear(struct xe_ggtt *ggtt, u64 start, u64 size)
144{
145 u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[XE_CACHE_WB];
146 u64 end = start + size - 1;
147 u64 scratch_pte;
148
149 xe_tile_assert(ggtt->tile, start < end);
150
151 if (ggtt->scratch)
152 scratch_pte = ggtt->pt_ops->pte_encode_bo(ggtt->scratch, 0,
153 pat_index);
154 else
155 scratch_pte = 0;
156
157 while (start < end) {
158 ggtt->pt_ops->ggtt_set_pte(ggtt, start, scratch_pte);
159 start += XE_PAGE_SIZE;
160 }
161}
162
163static void ggtt_fini_early(struct drm_device *drm, void *arg)
164{
165 struct xe_ggtt *ggtt = arg;
166
167 destroy_workqueue(ggtt->wq);
168 mutex_destroy(&ggtt->lock);
169 drm_mm_takedown(&ggtt->mm);
170}
171
172static void ggtt_fini(void *arg)
173{
174 struct xe_ggtt *ggtt = arg;
175
176 ggtt->scratch = NULL;
177}
178
179static void primelockdep(struct xe_ggtt *ggtt)
180{
181 if (!IS_ENABLED(CONFIG_LOCKDEP))
182 return;
183
184 fs_reclaim_acquire(GFP_KERNEL);
185 might_lock(&ggtt->lock);
186 fs_reclaim_release(GFP_KERNEL);
187}
188
189static const struct xe_ggtt_pt_ops xelp_pt_ops = {
190 .pte_encode_bo = xelp_ggtt_pte_encode_bo,
191 .ggtt_set_pte = xe_ggtt_set_pte,
192};
193
194static const struct xe_ggtt_pt_ops xelpg_pt_ops = {
195 .pte_encode_bo = xelpg_ggtt_pte_encode_bo,
196 .ggtt_set_pte = xe_ggtt_set_pte,
197};
198
199static const struct xe_ggtt_pt_ops xelpg_pt_wa_ops = {
200 .pte_encode_bo = xelpg_ggtt_pte_encode_bo,
201 .ggtt_set_pte = xe_ggtt_set_pte_and_flush,
202};
203
204/**
205 * xe_ggtt_init_early - Early GGTT initialization
206 * @ggtt: the &xe_ggtt to be initialized
207 *
208 * It allows to create new mappings usable by the GuC.
209 * Mappings are not usable by the HW engines, as it doesn't have scratch nor
210 * initial clear done to it yet. That will happen in the regular, non-early
211 * GGTT initialization.
212 *
213 * Return: 0 on success or a negative error code on failure.
214 */
215int xe_ggtt_init_early(struct xe_ggtt *ggtt)
216{
217 struct xe_device *xe = tile_to_xe(ggtt->tile);
218 struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
219 unsigned int gsm_size;
220 int err;
221
222 if (IS_SRIOV_VF(xe))
223 gsm_size = SZ_8M; /* GGTT is expected to be 4GiB */
224 else
225 gsm_size = probe_gsm_size(pdev);
226
227 if (gsm_size == 0) {
228 drm_err(&xe->drm, "Hardware reported no preallocated GSM\n");
229 return -ENOMEM;
230 }
231
232 ggtt->gsm = ggtt->tile->mmio.regs + SZ_8M;
233 ggtt->size = (gsm_size / 8) * (u64) XE_PAGE_SIZE;
234
235 if (IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
236 ggtt->flags |= XE_GGTT_FLAGS_64K;
237
238 if (ggtt->size > GUC_GGTT_TOP)
239 ggtt->size = GUC_GGTT_TOP;
240
241 if (GRAPHICS_VERx100(xe) >= 1270)
242 ggtt->pt_ops = (ggtt->tile->media_gt &&
243 XE_WA(ggtt->tile->media_gt, 22019338487)) ||
244 XE_WA(ggtt->tile->primary_gt, 22019338487) ?
245 &xelpg_pt_wa_ops : &xelpg_pt_ops;
246 else
247 ggtt->pt_ops = &xelp_pt_ops;
248
249 ggtt->wq = alloc_workqueue("xe-ggtt-wq", 0, WQ_MEM_RECLAIM);
250
251 drm_mm_init(&ggtt->mm, xe_wopcm_size(xe),
252 ggtt->size - xe_wopcm_size(xe));
253 mutex_init(&ggtt->lock);
254 primelockdep(ggtt);
255
256 err = drmm_add_action_or_reset(&xe->drm, ggtt_fini_early, ggtt);
257 if (err)
258 return err;
259
260 if (IS_SRIOV_VF(xe)) {
261 err = xe_gt_sriov_vf_prepare_ggtt(xe_tile_get_gt(ggtt->tile, 0));
262 if (err)
263 return err;
264 }
265
266 return 0;
267}
268ALLOW_ERROR_INJECTION(xe_ggtt_init_early, ERRNO); /* See xe_pci_probe() */
269
270static void xe_ggtt_invalidate(struct xe_ggtt *ggtt);
271
272static void xe_ggtt_initial_clear(struct xe_ggtt *ggtt)
273{
274 struct drm_mm_node *hole;
275 u64 start, end;
276
277 /* Display may have allocated inside ggtt, so be careful with clearing here */
278 mutex_lock(&ggtt->lock);
279 drm_mm_for_each_hole(hole, &ggtt->mm, start, end)
280 xe_ggtt_clear(ggtt, start, end - start);
281
282 xe_ggtt_invalidate(ggtt);
283 mutex_unlock(&ggtt->lock);
284}
285
286static void ggtt_node_remove(struct xe_ggtt_node *node)
287{
288 struct xe_ggtt *ggtt = node->ggtt;
289 struct xe_device *xe = tile_to_xe(ggtt->tile);
290 bool bound;
291 int idx;
292
293 bound = drm_dev_enter(&xe->drm, &idx);
294
295 mutex_lock(&ggtt->lock);
296 if (bound)
297 xe_ggtt_clear(ggtt, node->base.start, node->base.size);
298 drm_mm_remove_node(&node->base);
299 node->base.size = 0;
300 mutex_unlock(&ggtt->lock);
301
302 if (!bound)
303 goto free_node;
304
305 if (node->invalidate_on_remove)
306 xe_ggtt_invalidate(ggtt);
307
308 drm_dev_exit(idx);
309
310free_node:
311 xe_ggtt_node_fini(node);
312}
313
314static void ggtt_node_remove_work_func(struct work_struct *work)
315{
316 struct xe_ggtt_node *node = container_of(work, typeof(*node),
317 delayed_removal_work);
318 struct xe_device *xe = tile_to_xe(node->ggtt->tile);
319
320 xe_pm_runtime_get(xe);
321 ggtt_node_remove(node);
322 xe_pm_runtime_put(xe);
323}
324
325/**
326 * xe_ggtt_node_remove - Remove a &xe_ggtt_node from the GGTT
327 * @node: the &xe_ggtt_node to be removed
328 * @invalidate: if node needs invalidation upon removal
329 */
330void xe_ggtt_node_remove(struct xe_ggtt_node *node, bool invalidate)
331{
332 struct xe_ggtt *ggtt;
333 struct xe_device *xe;
334
335 if (!node || !node->ggtt)
336 return;
337
338 ggtt = node->ggtt;
339 xe = tile_to_xe(ggtt->tile);
340
341 node->invalidate_on_remove = invalidate;
342
343 if (xe_pm_runtime_get_if_active(xe)) {
344 ggtt_node_remove(node);
345 xe_pm_runtime_put(xe);
346 } else {
347 queue_work(ggtt->wq, &node->delayed_removal_work);
348 }
349}
350
351/**
352 * xe_ggtt_init - Regular non-early GGTT initialization
353 * @ggtt: the &xe_ggtt to be initialized
354 *
355 * Return: 0 on success or a negative error code on failure.
356 */
357int xe_ggtt_init(struct xe_ggtt *ggtt)
358{
359 struct xe_device *xe = tile_to_xe(ggtt->tile);
360 unsigned int flags;
361 int err;
362
363 /*
364 * So we don't need to worry about 64K GGTT layout when dealing with
365 * scratch entires, rather keep the scratch page in system memory on
366 * platforms where 64K pages are needed for VRAM.
367 */
368 flags = XE_BO_FLAG_PINNED;
369 if (ggtt->flags & XE_GGTT_FLAGS_64K)
370 flags |= XE_BO_FLAG_SYSTEM;
371 else
372 flags |= XE_BO_FLAG_VRAM_IF_DGFX(ggtt->tile);
373
374 ggtt->scratch = xe_managed_bo_create_pin_map(xe, ggtt->tile, XE_PAGE_SIZE, flags);
375 if (IS_ERR(ggtt->scratch)) {
376 err = PTR_ERR(ggtt->scratch);
377 goto err;
378 }
379
380 xe_map_memset(xe, &ggtt->scratch->vmap, 0, 0, ggtt->scratch->size);
381
382 xe_ggtt_initial_clear(ggtt);
383
384 return devm_add_action_or_reset(xe->drm.dev, ggtt_fini, ggtt);
385err:
386 ggtt->scratch = NULL;
387 return err;
388}
389
390static void ggtt_invalidate_gt_tlb(struct xe_gt *gt)
391{
392 int err;
393
394 if (!gt)
395 return;
396
397 err = xe_gt_tlb_invalidation_ggtt(gt);
398 if (err)
399 drm_warn(>_to_xe(gt)->drm, "xe_gt_tlb_invalidation_ggtt error=%d", err);
400}
401
402static void xe_ggtt_invalidate(struct xe_ggtt *ggtt)
403{
404 struct xe_device *xe = tile_to_xe(ggtt->tile);
405
406 /*
407 * XXX: Barrier for GGTT pages. Unsure exactly why this required but
408 * without this LNL is having issues with the GuC reading scratch page
409 * vs. correct GGTT page. Not particularly a hot code path so blindly
410 * do a mmio read here which results in GuC reading correct GGTT page.
411 */
412 xe_mmio_read32(xe_root_tile_mmio(xe), VF_CAP_REG);
413
414 /* Each GT in a tile has its own TLB to cache GGTT lookups */
415 ggtt_invalidate_gt_tlb(ggtt->tile->primary_gt);
416 ggtt_invalidate_gt_tlb(ggtt->tile->media_gt);
417}
418
419static void xe_ggtt_dump_node(struct xe_ggtt *ggtt,
420 const struct drm_mm_node *node, const char *description)
421{
422 char buf[10];
423
424 if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) {
425 string_get_size(node->size, 1, STRING_UNITS_2, buf, sizeof(buf));
426 xe_gt_dbg(ggtt->tile->primary_gt, "GGTT %#llx-%#llx (%s) %s\n",
427 node->start, node->start + node->size, buf, description);
428 }
429}
430
431/**
432 * xe_ggtt_node_insert_balloon - prevent allocation of specified GGTT addresses
433 * @node: the &xe_ggtt_node to hold reserved GGTT node
434 * @start: the starting GGTT address of the reserved region
435 * @end: then end GGTT address of the reserved region
436 *
437 * Use xe_ggtt_node_remove_balloon() to release a reserved GGTT node.
438 *
439 * Return: 0 on success or a negative error code on failure.
440 */
441int xe_ggtt_node_insert_balloon(struct xe_ggtt_node *node, u64 start, u64 end)
442{
443 struct xe_ggtt *ggtt = node->ggtt;
444 int err;
445
446 xe_tile_assert(ggtt->tile, start < end);
447 xe_tile_assert(ggtt->tile, IS_ALIGNED(start, XE_PAGE_SIZE));
448 xe_tile_assert(ggtt->tile, IS_ALIGNED(end, XE_PAGE_SIZE));
449 xe_tile_assert(ggtt->tile, !drm_mm_node_allocated(&node->base));
450
451 node->base.color = 0;
452 node->base.start = start;
453 node->base.size = end - start;
454
455 mutex_lock(&ggtt->lock);
456 err = drm_mm_reserve_node(&ggtt->mm, &node->base);
457 mutex_unlock(&ggtt->lock);
458
459 if (xe_gt_WARN(ggtt->tile->primary_gt, err,
460 "Failed to balloon GGTT %#llx-%#llx (%pe)\n",
461 node->base.start, node->base.start + node->base.size, ERR_PTR(err)))
462 return err;
463
464 xe_ggtt_dump_node(ggtt, &node->base, "balloon");
465 return 0;
466}
467
468/**
469 * xe_ggtt_node_remove_balloon - release a reserved GGTT region
470 * @node: the &xe_ggtt_node with reserved GGTT region
471 *
472 * See xe_ggtt_node_insert_balloon() for details.
473 */
474void xe_ggtt_node_remove_balloon(struct xe_ggtt_node *node)
475{
476 if (!node || !node->ggtt)
477 return;
478
479 if (!drm_mm_node_allocated(&node->base))
480 goto free_node;
481
482 xe_ggtt_dump_node(node->ggtt, &node->base, "remove-balloon");
483
484 mutex_lock(&node->ggtt->lock);
485 drm_mm_remove_node(&node->base);
486 mutex_unlock(&node->ggtt->lock);
487
488free_node:
489 xe_ggtt_node_fini(node);
490}
491
492/**
493 * xe_ggtt_node_insert_locked - Locked version to insert a &xe_ggtt_node into the GGTT
494 * @node: the &xe_ggtt_node to be inserted
495 * @size: size of the node
496 * @align: alignment constrain of the node
497 * @mm_flags: flags to control the node behavior
498 *
499 * It cannot be called without first having called xe_ggtt_init() once.
500 * To be used in cases where ggtt->lock is already taken.
501 *
502 * Return: 0 on success or a negative error code on failure.
503 */
504int xe_ggtt_node_insert_locked(struct xe_ggtt_node *node,
505 u32 size, u32 align, u32 mm_flags)
506{
507 return drm_mm_insert_node_generic(&node->ggtt->mm, &node->base, size, align, 0,
508 mm_flags);
509}
510
511/**
512 * xe_ggtt_node_insert - Insert a &xe_ggtt_node into the GGTT
513 * @node: the &xe_ggtt_node to be inserted
514 * @size: size of the node
515 * @align: alignment constrain of the node
516 *
517 * It cannot be called without first having called xe_ggtt_init() once.
518 *
519 * Return: 0 on success or a negative error code on failure.
520 */
521int xe_ggtt_node_insert(struct xe_ggtt_node *node, u32 size, u32 align)
522{
523 int ret;
524
525 if (!node || !node->ggtt)
526 return -ENOENT;
527
528 mutex_lock(&node->ggtt->lock);
529 ret = xe_ggtt_node_insert_locked(node, size, align,
530 DRM_MM_INSERT_HIGH);
531 mutex_unlock(&node->ggtt->lock);
532
533 return ret;
534}
535
536/**
537 * xe_ggtt_node_init - Initialize %xe_ggtt_node struct
538 * @ggtt: the &xe_ggtt where the new node will later be inserted/reserved.
539 *
540 * This function will allocated the struct %xe_ggtt_node and return it's pointer.
541 * This struct will then be freed after the node removal upon xe_ggtt_node_remove()
542 * or xe_ggtt_node_remove_balloon().
543 * Having %xe_ggtt_node struct allocated doesn't mean that the node is already allocated
544 * in GGTT. Only the xe_ggtt_node_insert(), xe_ggtt_node_insert_locked(),
545 * xe_ggtt_node_insert_balloon() will ensure the node is inserted or reserved in GGTT.
546 *
547 * Return: A pointer to %xe_ggtt_node struct on success. An ERR_PTR otherwise.
548 **/
549struct xe_ggtt_node *xe_ggtt_node_init(struct xe_ggtt *ggtt)
550{
551 struct xe_ggtt_node *node = kzalloc(sizeof(*node), GFP_NOFS);
552
553 if (!node)
554 return ERR_PTR(-ENOMEM);
555
556 INIT_WORK(&node->delayed_removal_work, ggtt_node_remove_work_func);
557 node->ggtt = ggtt;
558
559 return node;
560}
561
562/**
563 * xe_ggtt_node_fini - Forcebly finalize %xe_ggtt_node struct
564 * @node: the &xe_ggtt_node to be freed
565 *
566 * If anything went wrong with either xe_ggtt_node_insert(), xe_ggtt_node_insert_locked(),
567 * or xe_ggtt_node_insert_balloon(); and this @node is not going to be reused, then,
568 * this function needs to be called to free the %xe_ggtt_node struct
569 **/
570void xe_ggtt_node_fini(struct xe_ggtt_node *node)
571{
572 kfree(node);
573}
574
575/**
576 * xe_ggtt_node_allocated - Check if node is allocated in GGTT
577 * @node: the &xe_ggtt_node to be inspected
578 *
579 * Return: True if allocated, False otherwise.
580 */
581bool xe_ggtt_node_allocated(const struct xe_ggtt_node *node)
582{
583 if (!node || !node->ggtt)
584 return false;
585
586 return drm_mm_node_allocated(&node->base);
587}
588
589/**
590 * xe_ggtt_map_bo - Map the BO into GGTT
591 * @ggtt: the &xe_ggtt where node will be mapped
592 * @bo: the &xe_bo to be mapped
593 */
594void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
595{
596 u16 cache_mode = bo->flags & XE_BO_FLAG_NEEDS_UC ? XE_CACHE_NONE : XE_CACHE_WB;
597 u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[cache_mode];
598 u64 start;
599 u64 offset, pte;
600
601 if (XE_WARN_ON(!bo->ggtt_node))
602 return;
603
604 start = bo->ggtt_node->base.start;
605
606 for (offset = 0; offset < bo->size; offset += XE_PAGE_SIZE) {
607 pte = ggtt->pt_ops->pte_encode_bo(bo, offset, pat_index);
608 ggtt->pt_ops->ggtt_set_pte(ggtt, start + offset, pte);
609 }
610}
611
612static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
613 u64 start, u64 end)
614{
615 int err;
616 u64 alignment = bo->min_align > 0 ? bo->min_align : XE_PAGE_SIZE;
617
618 if (xe_bo_is_vram(bo) && ggtt->flags & XE_GGTT_FLAGS_64K)
619 alignment = SZ_64K;
620
621 if (XE_WARN_ON(bo->ggtt_node)) {
622 /* Someone's already inserted this BO in the GGTT */
623 xe_tile_assert(ggtt->tile, bo->ggtt_node->base.size == bo->size);
624 return 0;
625 }
626
627 err = xe_bo_validate(bo, NULL, false);
628 if (err)
629 return err;
630
631 xe_pm_runtime_get_noresume(tile_to_xe(ggtt->tile));
632
633 bo->ggtt_node = xe_ggtt_node_init(ggtt);
634 if (IS_ERR(bo->ggtt_node)) {
635 err = PTR_ERR(bo->ggtt_node);
636 bo->ggtt_node = NULL;
637 goto out;
638 }
639
640 mutex_lock(&ggtt->lock);
641 err = drm_mm_insert_node_in_range(&ggtt->mm, &bo->ggtt_node->base, bo->size,
642 alignment, 0, start, end, 0);
643 if (err) {
644 xe_ggtt_node_fini(bo->ggtt_node);
645 bo->ggtt_node = NULL;
646 } else {
647 xe_ggtt_map_bo(ggtt, bo);
648 }
649 mutex_unlock(&ggtt->lock);
650
651 if (!err && bo->flags & XE_BO_FLAG_GGTT_INVALIDATE)
652 xe_ggtt_invalidate(ggtt);
653
654out:
655 xe_pm_runtime_put(tile_to_xe(ggtt->tile));
656
657 return err;
658}
659
660/**
661 * xe_ggtt_insert_bo_at - Insert BO at a specific GGTT space
662 * @ggtt: the &xe_ggtt where bo will be inserted
663 * @bo: the &xe_bo to be inserted
664 * @start: address where it will be inserted
665 * @end: end of the range where it will be inserted
666 *
667 * Return: 0 on success or a negative error code on failure.
668 */
669int xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
670 u64 start, u64 end)
671{
672 return __xe_ggtt_insert_bo_at(ggtt, bo, start, end);
673}
674
675/**
676 * xe_ggtt_insert_bo - Insert BO into GGTT
677 * @ggtt: the &xe_ggtt where bo will be inserted
678 * @bo: the &xe_bo to be inserted
679 *
680 * Return: 0 on success or a negative error code on failure.
681 */
682int xe_ggtt_insert_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
683{
684 return __xe_ggtt_insert_bo_at(ggtt, bo, 0, U64_MAX);
685}
686
687/**
688 * xe_ggtt_remove_bo - Remove a BO from the GGTT
689 * @ggtt: the &xe_ggtt where node will be removed
690 * @bo: the &xe_bo to be removed
691 */
692void xe_ggtt_remove_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
693{
694 if (XE_WARN_ON(!bo->ggtt_node))
695 return;
696
697 /* This BO is not currently in the GGTT */
698 xe_tile_assert(ggtt->tile, bo->ggtt_node->base.size == bo->size);
699
700 xe_ggtt_node_remove(bo->ggtt_node,
701 bo->flags & XE_BO_FLAG_GGTT_INVALIDATE);
702}
703
704/**
705 * xe_ggtt_largest_hole - Largest GGTT hole
706 * @ggtt: the &xe_ggtt that will be inspected
707 * @alignment: minimum alignment
708 * @spare: If not NULL: in: desired memory size to be spared / out: Adjusted possible spare
709 *
710 * Return: size of the largest continuous GGTT region
711 */
712u64 xe_ggtt_largest_hole(struct xe_ggtt *ggtt, u64 alignment, u64 *spare)
713{
714 const struct drm_mm *mm = &ggtt->mm;
715 const struct drm_mm_node *entry;
716 u64 hole_min_start = xe_wopcm_size(tile_to_xe(ggtt->tile));
717 u64 hole_start, hole_end, hole_size;
718 u64 max_hole = 0;
719
720 mutex_lock(&ggtt->lock);
721
722 drm_mm_for_each_hole(entry, mm, hole_start, hole_end) {
723 hole_start = max(hole_start, hole_min_start);
724 hole_start = ALIGN(hole_start, alignment);
725 hole_end = ALIGN_DOWN(hole_end, alignment);
726 if (hole_start >= hole_end)
727 continue;
728 hole_size = hole_end - hole_start;
729 if (spare)
730 *spare -= min3(*spare, hole_size, max_hole);
731 max_hole = max(max_hole, hole_size);
732 }
733
734 mutex_unlock(&ggtt->lock);
735
736 return max_hole;
737}
738
739#ifdef CONFIG_PCI_IOV
740static u64 xe_encode_vfid_pte(u16 vfid)
741{
742 return FIELD_PREP(GGTT_PTE_VFID, vfid) | XE_PAGE_PRESENT;
743}
744
745static void xe_ggtt_assign_locked(struct xe_ggtt *ggtt, const struct drm_mm_node *node, u16 vfid)
746{
747 u64 start = node->start;
748 u64 size = node->size;
749 u64 end = start + size - 1;
750 u64 pte = xe_encode_vfid_pte(vfid);
751
752 lockdep_assert_held(&ggtt->lock);
753
754 if (!drm_mm_node_allocated(node))
755 return;
756
757 while (start < end) {
758 ggtt->pt_ops->ggtt_set_pte(ggtt, start, pte);
759 start += XE_PAGE_SIZE;
760 }
761
762 xe_ggtt_invalidate(ggtt);
763}
764
765/**
766 * xe_ggtt_assign - assign a GGTT region to the VF
767 * @node: the &xe_ggtt_node to update
768 * @vfid: the VF identifier
769 *
770 * This function is used by the PF driver to assign a GGTT region to the VF.
771 * In addition to PTE's VFID bits 11:2 also PRESENT bit 0 is set as on some
772 * platforms VFs can't modify that either.
773 */
774void xe_ggtt_assign(const struct xe_ggtt_node *node, u16 vfid)
775{
776 mutex_lock(&node->ggtt->lock);
777 xe_ggtt_assign_locked(node->ggtt, &node->base, vfid);
778 mutex_unlock(&node->ggtt->lock);
779}
780#endif
781
782/**
783 * xe_ggtt_dump - Dump GGTT for debug
784 * @ggtt: the &xe_ggtt to be dumped
785 * @p: the &drm_mm_printer helper handle to be used to dump the information
786 *
787 * Return: 0 on success or a negative error code on failure.
788 */
789int xe_ggtt_dump(struct xe_ggtt *ggtt, struct drm_printer *p)
790{
791 int err;
792
793 err = mutex_lock_interruptible(&ggtt->lock);
794 if (err)
795 return err;
796
797 drm_mm_print(&ggtt->mm, p);
798 mutex_unlock(&ggtt->lock);
799 return err;
800}
801
802/**
803 * xe_ggtt_print_holes - Print holes
804 * @ggtt: the &xe_ggtt to be inspected
805 * @alignment: min alignment
806 * @p: the &drm_printer
807 *
808 * Print GGTT ranges that are available and return total size available.
809 *
810 * Return: Total available size.
811 */
812u64 xe_ggtt_print_holes(struct xe_ggtt *ggtt, u64 alignment, struct drm_printer *p)
813{
814 const struct drm_mm *mm = &ggtt->mm;
815 const struct drm_mm_node *entry;
816 u64 hole_min_start = xe_wopcm_size(tile_to_xe(ggtt->tile));
817 u64 hole_start, hole_end, hole_size;
818 u64 total = 0;
819 char buf[10];
820
821 mutex_lock(&ggtt->lock);
822
823 drm_mm_for_each_hole(entry, mm, hole_start, hole_end) {
824 hole_start = max(hole_start, hole_min_start);
825 hole_start = ALIGN(hole_start, alignment);
826 hole_end = ALIGN_DOWN(hole_end, alignment);
827 if (hole_start >= hole_end)
828 continue;
829 hole_size = hole_end - hole_start;
830 total += hole_size;
831
832 string_get_size(hole_size, 1, STRING_UNITS_2, buf, sizeof(buf));
833 drm_printf(p, "range:\t%#llx-%#llx\t(%s)\n",
834 hole_start, hole_end - 1, buf);
835 }
836
837 mutex_unlock(&ggtt->lock);
838
839 return total;
840}
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2021 Intel Corporation
4 */
5
6#include "xe_ggtt.h"
7
8#include <linux/sizes.h>
9
10#include <drm/drm_managed.h>
11#include <drm/i915_drm.h>
12
13#include "regs/xe_gt_regs.h"
14#include "xe_bo.h"
15#include "xe_device.h"
16#include "xe_gt.h"
17#include "xe_gt_tlb_invalidation.h"
18#include "xe_map.h"
19#include "xe_mmio.h"
20#include "xe_wopcm.h"
21
22#define XELPG_GGTT_PTE_PAT0 BIT_ULL(52)
23#define XELPG_GGTT_PTE_PAT1 BIT_ULL(53)
24
25/* GuC addresses above GUC_GGTT_TOP also don't map through the GTT */
26#define GUC_GGTT_TOP 0xFEE00000
27
28static u64 xelp_ggtt_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
29 u16 pat_index)
30{
31 u64 pte;
32
33 pte = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
34 pte |= XE_PAGE_PRESENT;
35
36 if (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo))
37 pte |= XE_GGTT_PTE_DM;
38
39 return pte;
40}
41
42static u64 xelpg_ggtt_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
43 u16 pat_index)
44{
45 struct xe_device *xe = xe_bo_device(bo);
46 u64 pte;
47
48 pte = xelp_ggtt_pte_encode_bo(bo, bo_offset, pat_index);
49
50 xe_assert(xe, pat_index <= 3);
51
52 if (pat_index & BIT(0))
53 pte |= XELPG_GGTT_PTE_PAT0;
54
55 if (pat_index & BIT(1))
56 pte |= XELPG_GGTT_PTE_PAT1;
57
58 return pte;
59}
60
61static unsigned int probe_gsm_size(struct pci_dev *pdev)
62{
63 u16 gmch_ctl, ggms;
64
65 pci_read_config_word(pdev, SNB_GMCH_CTRL, &gmch_ctl);
66 ggms = (gmch_ctl >> BDW_GMCH_GGMS_SHIFT) & BDW_GMCH_GGMS_MASK;
67 return ggms ? SZ_1M << ggms : 0;
68}
69
70void xe_ggtt_set_pte(struct xe_ggtt *ggtt, u64 addr, u64 pte)
71{
72 xe_tile_assert(ggtt->tile, !(addr & XE_PTE_MASK));
73 xe_tile_assert(ggtt->tile, addr < ggtt->size);
74
75 writeq(pte, &ggtt->gsm[addr >> XE_PTE_SHIFT]);
76}
77
78static void xe_ggtt_clear(struct xe_ggtt *ggtt, u64 start, u64 size)
79{
80 u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[XE_CACHE_WB];
81 u64 end = start + size - 1;
82 u64 scratch_pte;
83
84 xe_tile_assert(ggtt->tile, start < end);
85
86 if (ggtt->scratch)
87 scratch_pte = ggtt->pt_ops->pte_encode_bo(ggtt->scratch, 0,
88 pat_index);
89 else
90 scratch_pte = 0;
91
92 while (start < end) {
93 xe_ggtt_set_pte(ggtt, start, scratch_pte);
94 start += XE_PAGE_SIZE;
95 }
96}
97
98static void ggtt_fini_early(struct drm_device *drm, void *arg)
99{
100 struct xe_ggtt *ggtt = arg;
101
102 mutex_destroy(&ggtt->lock);
103 drm_mm_takedown(&ggtt->mm);
104}
105
106static void ggtt_fini(struct drm_device *drm, void *arg)
107{
108 struct xe_ggtt *ggtt = arg;
109
110 ggtt->scratch = NULL;
111}
112
113static void primelockdep(struct xe_ggtt *ggtt)
114{
115 if (!IS_ENABLED(CONFIG_LOCKDEP))
116 return;
117
118 fs_reclaim_acquire(GFP_KERNEL);
119 might_lock(&ggtt->lock);
120 fs_reclaim_release(GFP_KERNEL);
121}
122
123static const struct xe_ggtt_pt_ops xelp_pt_ops = {
124 .pte_encode_bo = xelp_ggtt_pte_encode_bo,
125};
126
127static const struct xe_ggtt_pt_ops xelpg_pt_ops = {
128 .pte_encode_bo = xelpg_ggtt_pte_encode_bo,
129};
130
131/*
132 * Early GGTT initialization, which allows to create new mappings usable by the
133 * GuC.
134 * Mappings are not usable by the HW engines, as it doesn't have scratch /
135 * initial clear done to it yet. That will happen in the regular, non-early
136 * GGTT init.
137 */
138int xe_ggtt_init_early(struct xe_ggtt *ggtt)
139{
140 struct xe_device *xe = tile_to_xe(ggtt->tile);
141 struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
142 unsigned int gsm_size;
143
144 gsm_size = probe_gsm_size(pdev);
145 if (gsm_size == 0) {
146 drm_err(&xe->drm, "Hardware reported no preallocated GSM\n");
147 return -ENOMEM;
148 }
149
150 ggtt->gsm = ggtt->tile->mmio.regs + SZ_8M;
151 ggtt->size = (gsm_size / 8) * (u64) XE_PAGE_SIZE;
152
153 if (IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
154 ggtt->flags |= XE_GGTT_FLAGS_64K;
155
156 /*
157 * 8B per entry, each points to a 4KB page.
158 *
159 * The GuC address space is limited on both ends of the GGTT, because
160 * the GuC shim HW redirects accesses to those addresses to other HW
161 * areas instead of going through the GGTT. On the bottom end, the GuC
162 * can't access offsets below the WOPCM size, while on the top side the
163 * limit is fixed at GUC_GGTT_TOP. To keep things simple, instead of
164 * checking each object to see if they are accessed by GuC or not, we
165 * just exclude those areas from the allocator. Additionally, to
166 * simplify the driver load, we use the maximum WOPCM size in this logic
167 * instead of the programmed one, so we don't need to wait until the
168 * actual size to be programmed is determined (which requires FW fetch)
169 * before initializing the GGTT. These simplifications might waste space
170 * in the GGTT (about 20-25 MBs depending on the platform) but we can
171 * live with this.
172 *
173 * Another benifit of this is the GuC bootrom can't access anything
174 * below the WOPCM max size so anything the bootom needs to access (e.g.
175 * a RSA key) needs to be placed in the GGTT above the WOPCM max size.
176 * Starting the GGTT allocations above the WOPCM max give us the correct
177 * placement for free.
178 */
179 if (ggtt->size > GUC_GGTT_TOP)
180 ggtt->size = GUC_GGTT_TOP;
181
182 if (GRAPHICS_VERx100(xe) >= 1270)
183 ggtt->pt_ops = &xelpg_pt_ops;
184 else
185 ggtt->pt_ops = &xelp_pt_ops;
186
187 drm_mm_init(&ggtt->mm, xe_wopcm_size(xe),
188 ggtt->size - xe_wopcm_size(xe));
189 mutex_init(&ggtt->lock);
190 primelockdep(ggtt);
191
192 return drmm_add_action_or_reset(&xe->drm, ggtt_fini_early, ggtt);
193}
194
195static void xe_ggtt_initial_clear(struct xe_ggtt *ggtt)
196{
197 struct drm_mm_node *hole;
198 u64 start, end;
199
200 /* Display may have allocated inside ggtt, so be careful with clearing here */
201 xe_device_mem_access_get(tile_to_xe(ggtt->tile));
202 mutex_lock(&ggtt->lock);
203 drm_mm_for_each_hole(hole, &ggtt->mm, start, end)
204 xe_ggtt_clear(ggtt, start, end - start);
205
206 xe_ggtt_invalidate(ggtt);
207 mutex_unlock(&ggtt->lock);
208 xe_device_mem_access_put(tile_to_xe(ggtt->tile));
209}
210
211int xe_ggtt_init(struct xe_ggtt *ggtt)
212{
213 struct xe_device *xe = tile_to_xe(ggtt->tile);
214 unsigned int flags;
215 int err;
216
217 /*
218 * So we don't need to worry about 64K GGTT layout when dealing with
219 * scratch entires, rather keep the scratch page in system memory on
220 * platforms where 64K pages are needed for VRAM.
221 */
222 flags = XE_BO_CREATE_PINNED_BIT;
223 if (ggtt->flags & XE_GGTT_FLAGS_64K)
224 flags |= XE_BO_CREATE_SYSTEM_BIT;
225 else
226 flags |= XE_BO_CREATE_VRAM_IF_DGFX(ggtt->tile);
227
228 ggtt->scratch = xe_managed_bo_create_pin_map(xe, ggtt->tile, XE_PAGE_SIZE, flags);
229 if (IS_ERR(ggtt->scratch)) {
230 err = PTR_ERR(ggtt->scratch);
231 goto err;
232 }
233
234 xe_map_memset(xe, &ggtt->scratch->vmap, 0, 0, ggtt->scratch->size);
235
236 xe_ggtt_initial_clear(ggtt);
237
238 return drmm_add_action_or_reset(&xe->drm, ggtt_fini, ggtt);
239err:
240 ggtt->scratch = NULL;
241 return err;
242}
243
244#define GUC_TLB_INV_CR XE_REG(0xcee8)
245#define GUC_TLB_INV_CR_INVALIDATE REG_BIT(0)
246#define PVC_GUC_TLB_INV_DESC0 XE_REG(0xcf7c)
247#define PVC_GUC_TLB_INV_DESC0_VALID REG_BIT(0)
248#define PVC_GUC_TLB_INV_DESC1 XE_REG(0xcf80)
249#define PVC_GUC_TLB_INV_DESC1_INVALIDATE REG_BIT(6)
250
251static void ggtt_invalidate_gt_tlb(struct xe_gt *gt)
252{
253 if (!gt)
254 return;
255
256 /*
257 * Invalidation can happen when there's no in-flight work keeping the
258 * GT awake. We need to explicitly grab forcewake to ensure the GT
259 * and GuC are accessible.
260 */
261 xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
262
263 /* TODO: vfunc for GuC vs. non-GuC */
264
265 if (gt->uc.guc.submission_state.enabled) {
266 int seqno;
267
268 seqno = xe_gt_tlb_invalidation_guc(gt);
269 xe_gt_assert(gt, seqno > 0);
270 if (seqno > 0)
271 xe_gt_tlb_invalidation_wait(gt, seqno);
272 } else if (xe_device_uc_enabled(gt_to_xe(gt))) {
273 struct xe_device *xe = gt_to_xe(gt);
274
275 if (xe->info.platform == XE_PVC || GRAPHICS_VER(xe) >= 20) {
276 xe_mmio_write32(gt, PVC_GUC_TLB_INV_DESC1,
277 PVC_GUC_TLB_INV_DESC1_INVALIDATE);
278 xe_mmio_write32(gt, PVC_GUC_TLB_INV_DESC0,
279 PVC_GUC_TLB_INV_DESC0_VALID);
280 } else
281 xe_mmio_write32(gt, GUC_TLB_INV_CR,
282 GUC_TLB_INV_CR_INVALIDATE);
283 }
284
285 xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
286}
287
288void xe_ggtt_invalidate(struct xe_ggtt *ggtt)
289{
290 /* Each GT in a tile has its own TLB to cache GGTT lookups */
291 ggtt_invalidate_gt_tlb(ggtt->tile->primary_gt);
292 ggtt_invalidate_gt_tlb(ggtt->tile->media_gt);
293}
294
295void xe_ggtt_printk(struct xe_ggtt *ggtt, const char *prefix)
296{
297 u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[XE_CACHE_WB];
298 u64 addr, scratch_pte;
299
300 scratch_pte = ggtt->pt_ops->pte_encode_bo(ggtt->scratch, 0, pat_index);
301
302 printk("%sGlobal GTT:", prefix);
303 for (addr = 0; addr < ggtt->size; addr += XE_PAGE_SIZE) {
304 unsigned int i = addr / XE_PAGE_SIZE;
305
306 xe_tile_assert(ggtt->tile, addr <= U32_MAX);
307 if (ggtt->gsm[i] == scratch_pte)
308 continue;
309
310 printk("%s ggtt[0x%08x] = 0x%016llx",
311 prefix, (u32)addr, ggtt->gsm[i]);
312 }
313}
314
315int xe_ggtt_insert_special_node_locked(struct xe_ggtt *ggtt, struct drm_mm_node *node,
316 u32 size, u32 align, u32 mm_flags)
317{
318 return drm_mm_insert_node_generic(&ggtt->mm, node, size, align, 0,
319 mm_flags);
320}
321
322int xe_ggtt_insert_special_node(struct xe_ggtt *ggtt, struct drm_mm_node *node,
323 u32 size, u32 align)
324{
325 int ret;
326
327 mutex_lock(&ggtt->lock);
328 ret = xe_ggtt_insert_special_node_locked(ggtt, node, size,
329 align, DRM_MM_INSERT_HIGH);
330 mutex_unlock(&ggtt->lock);
331
332 return ret;
333}
334
335void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
336{
337 u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[XE_CACHE_WB];
338 u64 start = bo->ggtt_node.start;
339 u64 offset, pte;
340
341 for (offset = 0; offset < bo->size; offset += XE_PAGE_SIZE) {
342 pte = ggtt->pt_ops->pte_encode_bo(bo, offset, pat_index);
343 xe_ggtt_set_pte(ggtt, start + offset, pte);
344 }
345
346 xe_ggtt_invalidate(ggtt);
347}
348
349static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
350 u64 start, u64 end)
351{
352 int err;
353 u64 alignment = XE_PAGE_SIZE;
354
355 if (xe_bo_is_vram(bo) && ggtt->flags & XE_GGTT_FLAGS_64K)
356 alignment = SZ_64K;
357
358 if (XE_WARN_ON(bo->ggtt_node.size)) {
359 /* Someone's already inserted this BO in the GGTT */
360 xe_tile_assert(ggtt->tile, bo->ggtt_node.size == bo->size);
361 return 0;
362 }
363
364 err = xe_bo_validate(bo, NULL, false);
365 if (err)
366 return err;
367
368 xe_device_mem_access_get(tile_to_xe(ggtt->tile));
369 mutex_lock(&ggtt->lock);
370 err = drm_mm_insert_node_in_range(&ggtt->mm, &bo->ggtt_node, bo->size,
371 alignment, 0, start, end, 0);
372 if (!err)
373 xe_ggtt_map_bo(ggtt, bo);
374 mutex_unlock(&ggtt->lock);
375 xe_device_mem_access_put(tile_to_xe(ggtt->tile));
376
377 return err;
378}
379
380int xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
381 u64 start, u64 end)
382{
383 return __xe_ggtt_insert_bo_at(ggtt, bo, start, end);
384}
385
386int xe_ggtt_insert_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
387{
388 return __xe_ggtt_insert_bo_at(ggtt, bo, 0, U64_MAX);
389}
390
391void xe_ggtt_remove_node(struct xe_ggtt *ggtt, struct drm_mm_node *node)
392{
393 xe_device_mem_access_get(tile_to_xe(ggtt->tile));
394 mutex_lock(&ggtt->lock);
395
396 xe_ggtt_clear(ggtt, node->start, node->size);
397 drm_mm_remove_node(node);
398 node->size = 0;
399
400 xe_ggtt_invalidate(ggtt);
401
402 mutex_unlock(&ggtt->lock);
403 xe_device_mem_access_put(tile_to_xe(ggtt->tile));
404}
405
406void xe_ggtt_remove_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
407{
408 if (XE_WARN_ON(!bo->ggtt_node.size))
409 return;
410
411 /* This BO is not currently in the GGTT */
412 xe_tile_assert(ggtt->tile, bo->ggtt_node.size == bo->size);
413
414 xe_ggtt_remove_node(ggtt, &bo->ggtt_node);
415}
416
417int xe_ggtt_dump(struct xe_ggtt *ggtt, struct drm_printer *p)
418{
419 int err;
420
421 err = mutex_lock_interruptible(&ggtt->lock);
422 if (err)
423 return err;
424
425 drm_mm_print(&ggtt->mm, p);
426 mutex_unlock(&ggtt->lock);
427 return err;
428}