Loading...
1/*
2 * Copyright 2009 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30 * Dave Airlie
31 */
32
33#include <linux/dma-mapping.h>
34#include <linux/iommu.h>
35#include <linux/pagemap.h>
36#include <linux/sched/task.h>
37#include <linux/sched/mm.h>
38#include <linux/seq_file.h>
39#include <linux/slab.h>
40#include <linux/swap.h>
41#include <linux/swiotlb.h>
42#include <linux/dma-buf.h>
43#include <linux/sizes.h>
44
45#include <drm/ttm/ttm_bo_api.h>
46#include <drm/ttm/ttm_bo_driver.h>
47#include <drm/ttm/ttm_placement.h>
48#include <drm/ttm/ttm_range_manager.h>
49
50#include <drm/amdgpu_drm.h>
51
52#include "amdgpu.h"
53#include "amdgpu_object.h"
54#include "amdgpu_trace.h"
55#include "amdgpu_amdkfd.h"
56#include "amdgpu_sdma.h"
57#include "amdgpu_ras.h"
58#include "amdgpu_atomfirmware.h"
59#include "amdgpu_res_cursor.h"
60#include "bif/bif_4_1_d.h"
61
62#define AMDGPU_TTM_VRAM_MAX_DW_READ (size_t)128
63
64static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
65 struct ttm_tt *ttm,
66 struct ttm_resource *bo_mem);
67static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
68 struct ttm_tt *ttm);
69
70static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev,
71 unsigned int type,
72 uint64_t size_in_page)
73{
74 return ttm_range_man_init(&adev->mman.bdev, type,
75 false, size_in_page);
76}
77
78/**
79 * amdgpu_evict_flags - Compute placement flags
80 *
81 * @bo: The buffer object to evict
82 * @placement: Possible destination(s) for evicted BO
83 *
84 * Fill in placement data when ttm_bo_evict() is called
85 */
86static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
87 struct ttm_placement *placement)
88{
89 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
90 struct amdgpu_bo *abo;
91 static const struct ttm_place placements = {
92 .fpfn = 0,
93 .lpfn = 0,
94 .mem_type = TTM_PL_SYSTEM,
95 .flags = 0
96 };
97
98 /* Don't handle scatter gather BOs */
99 if (bo->type == ttm_bo_type_sg) {
100 placement->num_placement = 0;
101 placement->num_busy_placement = 0;
102 return;
103 }
104
105 /* Object isn't an AMDGPU object so ignore */
106 if (!amdgpu_bo_is_amdgpu_bo(bo)) {
107 placement->placement = &placements;
108 placement->busy_placement = &placements;
109 placement->num_placement = 1;
110 placement->num_busy_placement = 1;
111 return;
112 }
113
114 abo = ttm_to_amdgpu_bo(bo);
115 if (abo->flags & AMDGPU_AMDKFD_CREATE_SVM_BO) {
116 struct dma_fence *fence;
117 struct dma_resv *resv = &bo->base._resv;
118
119 rcu_read_lock();
120 fence = rcu_dereference(resv->fence_excl);
121 if (fence && !fence->ops->signaled)
122 dma_fence_enable_sw_signaling(fence);
123
124 placement->num_placement = 0;
125 placement->num_busy_placement = 0;
126 rcu_read_unlock();
127 return;
128 }
129
130 switch (bo->resource->mem_type) {
131 case AMDGPU_PL_GDS:
132 case AMDGPU_PL_GWS:
133 case AMDGPU_PL_OA:
134 placement->num_placement = 0;
135 placement->num_busy_placement = 0;
136 return;
137
138 case TTM_PL_VRAM:
139 if (!adev->mman.buffer_funcs_enabled) {
140 /* Move to system memory */
141 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
142 } else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
143 !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
144 amdgpu_bo_in_cpu_visible_vram(abo)) {
145
146 /* Try evicting to the CPU inaccessible part of VRAM
147 * first, but only set GTT as busy placement, so this
148 * BO will be evicted to GTT rather than causing other
149 * BOs to be evicted from VRAM
150 */
151 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
152 AMDGPU_GEM_DOMAIN_GTT);
153 abo->placements[0].fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
154 abo->placements[0].lpfn = 0;
155 abo->placement.busy_placement = &abo->placements[1];
156 abo->placement.num_busy_placement = 1;
157 } else {
158 /* Move to GTT memory */
159 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
160 }
161 break;
162 case TTM_PL_TT:
163 case AMDGPU_PL_PREEMPT:
164 default:
165 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
166 break;
167 }
168 *placement = abo->placement;
169}
170
171/**
172 * amdgpu_ttm_map_buffer - Map memory into the GART windows
173 * @bo: buffer object to map
174 * @mem: memory object to map
175 * @mm_cur: range to map
176 * @num_pages: number of pages to map
177 * @window: which GART window to use
178 * @ring: DMA ring to use for the copy
179 * @tmz: if we should setup a TMZ enabled mapping
180 * @addr: resulting address inside the MC address space
181 *
182 * Setup one of the GART windows to access a specific piece of memory or return
183 * the physical address for local memory.
184 */
185static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
186 struct ttm_resource *mem,
187 struct amdgpu_res_cursor *mm_cur,
188 unsigned num_pages, unsigned window,
189 struct amdgpu_ring *ring, bool tmz,
190 uint64_t *addr)
191{
192 struct amdgpu_device *adev = ring->adev;
193 struct amdgpu_job *job;
194 unsigned num_dw, num_bytes;
195 struct dma_fence *fence;
196 uint64_t src_addr, dst_addr;
197 void *cpu_addr;
198 uint64_t flags;
199 unsigned int i;
200 int r;
201
202 BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
203 AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
204 BUG_ON(mem->mem_type == AMDGPU_PL_PREEMPT);
205
206 /* Map only what can't be accessed directly */
207 if (!tmz && mem->start != AMDGPU_BO_INVALID_OFFSET) {
208 *addr = amdgpu_ttm_domain_start(adev, mem->mem_type) +
209 mm_cur->start;
210 return 0;
211 }
212
213 *addr = adev->gmc.gart_start;
214 *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
215 AMDGPU_GPU_PAGE_SIZE;
216 *addr += mm_cur->start & ~PAGE_MASK;
217
218 num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
219 num_bytes = num_pages * 8 * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
220
221 r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes,
222 AMDGPU_IB_POOL_DELAYED, &job);
223 if (r)
224 return r;
225
226 src_addr = num_dw * 4;
227 src_addr += job->ibs[0].gpu_addr;
228
229 dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
230 dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8;
231 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
232 dst_addr, num_bytes, false);
233
234 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
235 WARN_ON(job->ibs[0].length_dw > num_dw);
236
237 flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, mem);
238 if (tmz)
239 flags |= AMDGPU_PTE_TMZ;
240
241 cpu_addr = &job->ibs[0].ptr[num_dw];
242
243 if (mem->mem_type == TTM_PL_TT) {
244 dma_addr_t *dma_addr;
245
246 dma_addr = &bo->ttm->dma_address[mm_cur->start >> PAGE_SHIFT];
247 r = amdgpu_gart_map(adev, 0, num_pages, dma_addr, flags,
248 cpu_addr);
249 if (r)
250 goto error_free;
251 } else {
252 dma_addr_t dma_address;
253
254 dma_address = mm_cur->start;
255 dma_address += adev->vm_manager.vram_base_offset;
256
257 for (i = 0; i < num_pages; ++i) {
258 r = amdgpu_gart_map(adev, i << PAGE_SHIFT, 1,
259 &dma_address, flags, cpu_addr);
260 if (r)
261 goto error_free;
262
263 dma_address += PAGE_SIZE;
264 }
265 }
266
267 r = amdgpu_job_submit(job, &adev->mman.entity,
268 AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
269 if (r)
270 goto error_free;
271
272 dma_fence_put(fence);
273
274 return r;
275
276error_free:
277 amdgpu_job_free(job);
278 return r;
279}
280
281/**
282 * amdgpu_ttm_copy_mem_to_mem - Helper function for copy
283 * @adev: amdgpu device
284 * @src: buffer/address where to read from
285 * @dst: buffer/address where to write to
286 * @size: number of bytes to copy
287 * @tmz: if a secure copy should be used
288 * @resv: resv object to sync to
289 * @f: Returns the last fence if multiple jobs are submitted.
290 *
291 * The function copies @size bytes from {src->mem + src->offset} to
292 * {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a
293 * move and different for a BO to BO copy.
294 *
295 */
296int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
297 const struct amdgpu_copy_mem *src,
298 const struct amdgpu_copy_mem *dst,
299 uint64_t size, bool tmz,
300 struct dma_resv *resv,
301 struct dma_fence **f)
302{
303 const uint32_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE *
304 AMDGPU_GPU_PAGE_SIZE);
305
306 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
307 struct amdgpu_res_cursor src_mm, dst_mm;
308 struct dma_fence *fence = NULL;
309 int r = 0;
310
311 if (!adev->mman.buffer_funcs_enabled) {
312 DRM_ERROR("Trying to move memory with ring turned off.\n");
313 return -EINVAL;
314 }
315
316 amdgpu_res_first(src->mem, src->offset, size, &src_mm);
317 amdgpu_res_first(dst->mem, dst->offset, size, &dst_mm);
318
319 mutex_lock(&adev->mman.gtt_window_lock);
320 while (src_mm.remaining) {
321 uint32_t src_page_offset = src_mm.start & ~PAGE_MASK;
322 uint32_t dst_page_offset = dst_mm.start & ~PAGE_MASK;
323 struct dma_fence *next;
324 uint32_t cur_size;
325 uint64_t from, to;
326
327 /* Copy size cannot exceed GTT_MAX_BYTES. So if src or dst
328 * begins at an offset, then adjust the size accordingly
329 */
330 cur_size = max(src_page_offset, dst_page_offset);
331 cur_size = min(min3(src_mm.size, dst_mm.size, size),
332 (uint64_t)(GTT_MAX_BYTES - cur_size));
333
334 /* Map src to window 0 and dst to window 1. */
335 r = amdgpu_ttm_map_buffer(src->bo, src->mem, &src_mm,
336 PFN_UP(cur_size + src_page_offset),
337 0, ring, tmz, &from);
338 if (r)
339 goto error;
340
341 r = amdgpu_ttm_map_buffer(dst->bo, dst->mem, &dst_mm,
342 PFN_UP(cur_size + dst_page_offset),
343 1, ring, tmz, &to);
344 if (r)
345 goto error;
346
347 r = amdgpu_copy_buffer(ring, from, to, cur_size,
348 resv, &next, false, true, tmz);
349 if (r)
350 goto error;
351
352 dma_fence_put(fence);
353 fence = next;
354
355 amdgpu_res_next(&src_mm, cur_size);
356 amdgpu_res_next(&dst_mm, cur_size);
357 }
358error:
359 mutex_unlock(&adev->mman.gtt_window_lock);
360 if (f)
361 *f = dma_fence_get(fence);
362 dma_fence_put(fence);
363 return r;
364}
365
366/*
367 * amdgpu_move_blit - Copy an entire buffer to another buffer
368 *
369 * This is a helper called by amdgpu_bo_move() and amdgpu_move_vram_ram() to
370 * help move buffers to and from VRAM.
371 */
372static int amdgpu_move_blit(struct ttm_buffer_object *bo,
373 bool evict,
374 struct ttm_resource *new_mem,
375 struct ttm_resource *old_mem)
376{
377 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
378 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
379 struct amdgpu_copy_mem src, dst;
380 struct dma_fence *fence = NULL;
381 int r;
382
383 src.bo = bo;
384 dst.bo = bo;
385 src.mem = old_mem;
386 dst.mem = new_mem;
387 src.offset = 0;
388 dst.offset = 0;
389
390 r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst,
391 new_mem->num_pages << PAGE_SHIFT,
392 amdgpu_bo_encrypted(abo),
393 bo->base.resv, &fence);
394 if (r)
395 goto error;
396
397 /* clear the space being freed */
398 if (old_mem->mem_type == TTM_PL_VRAM &&
399 (abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) {
400 struct dma_fence *wipe_fence = NULL;
401
402 r = amdgpu_fill_buffer(ttm_to_amdgpu_bo(bo), AMDGPU_POISON,
403 NULL, &wipe_fence);
404 if (r) {
405 goto error;
406 } else if (wipe_fence) {
407 dma_fence_put(fence);
408 fence = wipe_fence;
409 }
410 }
411
412 /* Always block for VM page tables before committing the new location */
413 if (bo->type == ttm_bo_type_kernel)
414 r = ttm_bo_move_accel_cleanup(bo, fence, true, false, new_mem);
415 else
416 r = ttm_bo_move_accel_cleanup(bo, fence, evict, true, new_mem);
417 dma_fence_put(fence);
418 return r;
419
420error:
421 if (fence)
422 dma_fence_wait(fence, false);
423 dma_fence_put(fence);
424 return r;
425}
426
427/*
428 * amdgpu_mem_visible - Check that memory can be accessed by ttm_bo_move_memcpy
429 *
430 * Called by amdgpu_bo_move()
431 */
432static bool amdgpu_mem_visible(struct amdgpu_device *adev,
433 struct ttm_resource *mem)
434{
435 uint64_t mem_size = (u64)mem->num_pages << PAGE_SHIFT;
436 struct amdgpu_res_cursor cursor;
437
438 if (mem->mem_type == TTM_PL_SYSTEM ||
439 mem->mem_type == TTM_PL_TT)
440 return true;
441 if (mem->mem_type != TTM_PL_VRAM)
442 return false;
443
444 amdgpu_res_first(mem, 0, mem_size, &cursor);
445
446 /* ttm_resource_ioremap only supports contiguous memory */
447 if (cursor.size != mem_size)
448 return false;
449
450 return cursor.start + cursor.size <= adev->gmc.visible_vram_size;
451}
452
453/*
454 * amdgpu_bo_move - Move a buffer object to a new memory location
455 *
456 * Called by ttm_bo_handle_move_mem()
457 */
458static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
459 struct ttm_operation_ctx *ctx,
460 struct ttm_resource *new_mem,
461 struct ttm_place *hop)
462{
463 struct amdgpu_device *adev;
464 struct amdgpu_bo *abo;
465 struct ttm_resource *old_mem = bo->resource;
466 int r;
467
468 if (new_mem->mem_type == TTM_PL_TT ||
469 new_mem->mem_type == AMDGPU_PL_PREEMPT) {
470 r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, new_mem);
471 if (r)
472 return r;
473 }
474
475 /* Can't move a pinned BO */
476 abo = ttm_to_amdgpu_bo(bo);
477 if (WARN_ON_ONCE(abo->tbo.pin_count > 0))
478 return -EINVAL;
479
480 adev = amdgpu_ttm_adev(bo->bdev);
481
482 if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
483 ttm_bo_move_null(bo, new_mem);
484 goto out;
485 }
486 if (old_mem->mem_type == TTM_PL_SYSTEM &&
487 (new_mem->mem_type == TTM_PL_TT ||
488 new_mem->mem_type == AMDGPU_PL_PREEMPT)) {
489 ttm_bo_move_null(bo, new_mem);
490 goto out;
491 }
492 if ((old_mem->mem_type == TTM_PL_TT ||
493 old_mem->mem_type == AMDGPU_PL_PREEMPT) &&
494 new_mem->mem_type == TTM_PL_SYSTEM) {
495 r = ttm_bo_wait_ctx(bo, ctx);
496 if (r)
497 return r;
498
499 amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm);
500 ttm_resource_free(bo, &bo->resource);
501 ttm_bo_assign_mem(bo, new_mem);
502 goto out;
503 }
504
505 if (old_mem->mem_type == AMDGPU_PL_GDS ||
506 old_mem->mem_type == AMDGPU_PL_GWS ||
507 old_mem->mem_type == AMDGPU_PL_OA ||
508 new_mem->mem_type == AMDGPU_PL_GDS ||
509 new_mem->mem_type == AMDGPU_PL_GWS ||
510 new_mem->mem_type == AMDGPU_PL_OA) {
511 /* Nothing to save here */
512 ttm_bo_move_null(bo, new_mem);
513 goto out;
514 }
515
516 if (bo->type == ttm_bo_type_device &&
517 new_mem->mem_type == TTM_PL_VRAM &&
518 old_mem->mem_type != TTM_PL_VRAM) {
519 /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
520 * accesses the BO after it's moved.
521 */
522 abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
523 }
524
525 if (adev->mman.buffer_funcs_enabled) {
526 if (((old_mem->mem_type == TTM_PL_SYSTEM &&
527 new_mem->mem_type == TTM_PL_VRAM) ||
528 (old_mem->mem_type == TTM_PL_VRAM &&
529 new_mem->mem_type == TTM_PL_SYSTEM))) {
530 hop->fpfn = 0;
531 hop->lpfn = 0;
532 hop->mem_type = TTM_PL_TT;
533 hop->flags = 0;
534 return -EMULTIHOP;
535 }
536
537 r = amdgpu_move_blit(bo, evict, new_mem, old_mem);
538 } else {
539 r = -ENODEV;
540 }
541
542 if (r) {
543 /* Check that all memory is CPU accessible */
544 if (!amdgpu_mem_visible(adev, old_mem) ||
545 !amdgpu_mem_visible(adev, new_mem)) {
546 pr_err("Move buffer fallback to memcpy unavailable\n");
547 return r;
548 }
549
550 r = ttm_bo_move_memcpy(bo, ctx, new_mem);
551 if (r)
552 return r;
553 }
554
555out:
556 /* update statistics */
557 atomic64_add(bo->base.size, &adev->num_bytes_moved);
558 amdgpu_bo_move_notify(bo, evict, new_mem);
559 return 0;
560}
561
562/*
563 * amdgpu_ttm_io_mem_reserve - Reserve a block of memory during a fault
564 *
565 * Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault()
566 */
567static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
568 struct ttm_resource *mem)
569{
570 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
571 size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
572
573 switch (mem->mem_type) {
574 case TTM_PL_SYSTEM:
575 /* system memory */
576 return 0;
577 case TTM_PL_TT:
578 case AMDGPU_PL_PREEMPT:
579 break;
580 case TTM_PL_VRAM:
581 mem->bus.offset = mem->start << PAGE_SHIFT;
582 /* check if it's visible */
583 if ((mem->bus.offset + bus_size) > adev->gmc.visible_vram_size)
584 return -EINVAL;
585
586 if (adev->mman.aper_base_kaddr &&
587 mem->placement & TTM_PL_FLAG_CONTIGUOUS)
588 mem->bus.addr = (u8 *)adev->mman.aper_base_kaddr +
589 mem->bus.offset;
590
591 mem->bus.offset += adev->gmc.aper_base;
592 mem->bus.is_iomem = true;
593 break;
594 default:
595 return -EINVAL;
596 }
597 return 0;
598}
599
600static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
601 unsigned long page_offset)
602{
603 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
604 struct amdgpu_res_cursor cursor;
605
606 amdgpu_res_first(bo->resource, (u64)page_offset << PAGE_SHIFT, 0,
607 &cursor);
608 return (adev->gmc.aper_base + cursor.start) >> PAGE_SHIFT;
609}
610
611/**
612 * amdgpu_ttm_domain_start - Returns GPU start address
613 * @adev: amdgpu device object
614 * @type: type of the memory
615 *
616 * Returns:
617 * GPU start address of a memory domain
618 */
619
620uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type)
621{
622 switch (type) {
623 case TTM_PL_TT:
624 return adev->gmc.gart_start;
625 case TTM_PL_VRAM:
626 return adev->gmc.vram_start;
627 }
628
629 return 0;
630}
631
632/*
633 * TTM backend functions.
634 */
635struct amdgpu_ttm_tt {
636 struct ttm_tt ttm;
637 struct drm_gem_object *gobj;
638 u64 offset;
639 uint64_t userptr;
640 struct task_struct *usertask;
641 uint32_t userflags;
642 bool bound;
643#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
644 struct hmm_range *range;
645#endif
646};
647
648#ifdef CONFIG_DRM_AMDGPU_USERPTR
649/*
650 * amdgpu_ttm_tt_get_user_pages - get device accessible pages that back user
651 * memory and start HMM tracking CPU page table update
652 *
653 * Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only
654 * once afterwards to stop HMM tracking
655 */
656int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
657{
658 struct ttm_tt *ttm = bo->tbo.ttm;
659 struct amdgpu_ttm_tt *gtt = (void *)ttm;
660 unsigned long start = gtt->userptr;
661 struct vm_area_struct *vma;
662 struct mm_struct *mm;
663 bool readonly;
664 int r = 0;
665
666 mm = bo->notifier.mm;
667 if (unlikely(!mm)) {
668 DRM_DEBUG_DRIVER("BO is not registered?\n");
669 return -EFAULT;
670 }
671
672 /* Another get_user_pages is running at the same time?? */
673 if (WARN_ON(gtt->range))
674 return -EFAULT;
675
676 if (!mmget_not_zero(mm)) /* Happens during process shutdown */
677 return -ESRCH;
678
679 mmap_read_lock(mm);
680 vma = vma_lookup(mm, start);
681 if (unlikely(!vma)) {
682 r = -EFAULT;
683 goto out_unlock;
684 }
685 if (unlikely((gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) &&
686 vma->vm_file)) {
687 r = -EPERM;
688 goto out_unlock;
689 }
690
691 readonly = amdgpu_ttm_tt_is_readonly(ttm);
692 r = amdgpu_hmm_range_get_pages(&bo->notifier, mm, pages, start,
693 ttm->num_pages, >t->range, readonly,
694 true, NULL);
695out_unlock:
696 mmap_read_unlock(mm);
697 mmput(mm);
698
699 return r;
700}
701
702/*
703 * amdgpu_ttm_tt_userptr_range_done - stop HMM track the CPU page table change
704 * Check if the pages backing this ttm range have been invalidated
705 *
706 * Returns: true if pages are still valid
707 */
708bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
709{
710 struct amdgpu_ttm_tt *gtt = (void *)ttm;
711 bool r = false;
712
713 if (!gtt || !gtt->userptr)
714 return false;
715
716 DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%x\n",
717 gtt->userptr, ttm->num_pages);
718
719 WARN_ONCE(!gtt->range || !gtt->range->hmm_pfns,
720 "No user pages to check\n");
721
722 if (gtt->range) {
723 /*
724 * FIXME: Must always hold notifier_lock for this, and must
725 * not ignore the return code.
726 */
727 r = amdgpu_hmm_range_get_pages_done(gtt->range);
728 gtt->range = NULL;
729 }
730
731 return !r;
732}
733#endif
734
735/*
736 * amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages as necessary.
737 *
738 * Called by amdgpu_cs_list_validate(). This creates the page list
739 * that backs user memory and will ultimately be mapped into the device
740 * address space.
741 */
742void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
743{
744 unsigned long i;
745
746 for (i = 0; i < ttm->num_pages; ++i)
747 ttm->pages[i] = pages ? pages[i] : NULL;
748}
749
750/*
751 * amdgpu_ttm_tt_pin_userptr - prepare the sg table with the user pages
752 *
753 * Called by amdgpu_ttm_backend_bind()
754 **/
755static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev,
756 struct ttm_tt *ttm)
757{
758 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
759 struct amdgpu_ttm_tt *gtt = (void *)ttm;
760 int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
761 enum dma_data_direction direction = write ?
762 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
763 int r;
764
765 /* Allocate an SG array and squash pages into it */
766 r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
767 (u64)ttm->num_pages << PAGE_SHIFT,
768 GFP_KERNEL);
769 if (r)
770 goto release_sg;
771
772 /* Map SG to device */
773 r = dma_map_sgtable(adev->dev, ttm->sg, direction, 0);
774 if (r)
775 goto release_sg;
776
777 /* convert SG to linear array of pages and dma addresses */
778 drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
779 ttm->num_pages);
780
781 return 0;
782
783release_sg:
784 kfree(ttm->sg);
785 ttm->sg = NULL;
786 return r;
787}
788
789/*
790 * amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages
791 */
792static void amdgpu_ttm_tt_unpin_userptr(struct ttm_device *bdev,
793 struct ttm_tt *ttm)
794{
795 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
796 struct amdgpu_ttm_tt *gtt = (void *)ttm;
797 int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
798 enum dma_data_direction direction = write ?
799 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
800
801 /* double check that we don't free the table twice */
802 if (!ttm->sg || !ttm->sg->sgl)
803 return;
804
805 /* unmap the pages mapped to the device */
806 dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
807 sg_free_table(ttm->sg);
808
809#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
810 if (gtt->range) {
811 unsigned long i;
812
813 for (i = 0; i < ttm->num_pages; i++) {
814 if (ttm->pages[i] !=
815 hmm_pfn_to_page(gtt->range->hmm_pfns[i]))
816 break;
817 }
818
819 WARN((i == ttm->num_pages), "Missing get_user_page_done\n");
820 }
821#endif
822}
823
824static int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
825 struct ttm_buffer_object *tbo,
826 uint64_t flags)
827{
828 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo);
829 struct ttm_tt *ttm = tbo->ttm;
830 struct amdgpu_ttm_tt *gtt = (void *)ttm;
831 int r;
832
833 if (amdgpu_bo_encrypted(abo))
834 flags |= AMDGPU_PTE_TMZ;
835
836 if (abo->flags & AMDGPU_GEM_CREATE_CP_MQD_GFX9) {
837 uint64_t page_idx = 1;
838
839 r = amdgpu_gart_bind(adev, gtt->offset, page_idx,
840 gtt->ttm.dma_address, flags);
841 if (r)
842 goto gart_bind_fail;
843
844 /* The memory type of the first page defaults to UC. Now
845 * modify the memory type to NC from the second page of
846 * the BO onward.
847 */
848 flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
849 flags |= AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_NC);
850
851 r = amdgpu_gart_bind(adev,
852 gtt->offset + (page_idx << PAGE_SHIFT),
853 ttm->num_pages - page_idx,
854 &(gtt->ttm.dma_address[page_idx]), flags);
855 } else {
856 r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
857 gtt->ttm.dma_address, flags);
858 }
859
860gart_bind_fail:
861 if (r)
862 DRM_ERROR("failed to bind %u pages at 0x%08llX\n",
863 ttm->num_pages, gtt->offset);
864
865 return r;
866}
867
868/*
869 * amdgpu_ttm_backend_bind - Bind GTT memory
870 *
871 * Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem().
872 * This handles binding GTT memory to the device address space.
873 */
874static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
875 struct ttm_tt *ttm,
876 struct ttm_resource *bo_mem)
877{
878 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
879 struct amdgpu_ttm_tt *gtt = (void*)ttm;
880 uint64_t flags;
881 int r = 0;
882
883 if (!bo_mem)
884 return -EINVAL;
885
886 if (gtt->bound)
887 return 0;
888
889 if (gtt->userptr) {
890 r = amdgpu_ttm_tt_pin_userptr(bdev, ttm);
891 if (r) {
892 DRM_ERROR("failed to pin userptr\n");
893 return r;
894 }
895 } else if (ttm->page_flags & TTM_PAGE_FLAG_SG) {
896 if (!ttm->sg) {
897 struct dma_buf_attachment *attach;
898 struct sg_table *sgt;
899
900 attach = gtt->gobj->import_attach;
901 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
902 if (IS_ERR(sgt))
903 return PTR_ERR(sgt);
904
905 ttm->sg = sgt;
906 }
907
908 drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
909 ttm->num_pages);
910 }
911
912 if (!ttm->num_pages) {
913 WARN(1, "nothing to bind %u pages for mreg %p back %p!\n",
914 ttm->num_pages, bo_mem, ttm);
915 }
916
917 if (bo_mem->mem_type == AMDGPU_PL_GDS ||
918 bo_mem->mem_type == AMDGPU_PL_GWS ||
919 bo_mem->mem_type == AMDGPU_PL_OA)
920 return -EINVAL;
921
922 if (bo_mem->mem_type != TTM_PL_TT ||
923 !amdgpu_gtt_mgr_has_gart_addr(bo_mem)) {
924 gtt->offset = AMDGPU_BO_INVALID_OFFSET;
925 return 0;
926 }
927
928 /* compute PTE flags relevant to this BO memory */
929 flags = amdgpu_ttm_tt_pte_flags(adev, ttm, bo_mem);
930
931 /* bind pages into GART page tables */
932 gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
933 r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
934 gtt->ttm.dma_address, flags);
935
936 if (r)
937 DRM_ERROR("failed to bind %u pages at 0x%08llX\n",
938 ttm->num_pages, gtt->offset);
939 gtt->bound = true;
940 return r;
941}
942
943/*
944 * amdgpu_ttm_alloc_gart - Make sure buffer object is accessible either
945 * through AGP or GART aperture.
946 *
947 * If bo is accessible through AGP aperture, then use AGP aperture
948 * to access bo; otherwise allocate logical space in GART aperture
949 * and map bo to GART aperture.
950 */
951int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
952{
953 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
954 struct ttm_operation_ctx ctx = { false, false };
955 struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
956 struct ttm_placement placement;
957 struct ttm_place placements;
958 struct ttm_resource *tmp;
959 uint64_t addr, flags;
960 int r;
961
962 if (bo->resource->start != AMDGPU_BO_INVALID_OFFSET)
963 return 0;
964
965 addr = amdgpu_gmc_agp_addr(bo);
966 if (addr != AMDGPU_BO_INVALID_OFFSET) {
967 bo->resource->start = addr >> PAGE_SHIFT;
968 return 0;
969 }
970
971 /* allocate GART space */
972 placement.num_placement = 1;
973 placement.placement = &placements;
974 placement.num_busy_placement = 1;
975 placement.busy_placement = &placements;
976 placements.fpfn = 0;
977 placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
978 placements.mem_type = TTM_PL_TT;
979 placements.flags = bo->resource->placement;
980
981 r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
982 if (unlikely(r))
983 return r;
984
985 /* compute PTE flags for this buffer object */
986 flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, tmp);
987
988 /* Bind pages */
989 gtt->offset = (u64)tmp->start << PAGE_SHIFT;
990 r = amdgpu_ttm_gart_bind(adev, bo, flags);
991 if (unlikely(r)) {
992 ttm_resource_free(bo, &tmp);
993 return r;
994 }
995
996 amdgpu_gart_invalidate_tlb(adev);
997 ttm_resource_free(bo, &bo->resource);
998 ttm_bo_assign_mem(bo, tmp);
999
1000 return 0;
1001}
1002
1003/*
1004 * amdgpu_ttm_recover_gart - Rebind GTT pages
1005 *
1006 * Called by amdgpu_gtt_mgr_recover() from amdgpu_device_reset() to
1007 * rebind GTT pages during a GPU reset.
1008 */
1009int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
1010{
1011 struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
1012 uint64_t flags;
1013 int r;
1014
1015 if (!tbo->ttm)
1016 return 0;
1017
1018 flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, tbo->resource);
1019 r = amdgpu_ttm_gart_bind(adev, tbo, flags);
1020
1021 return r;
1022}
1023
1024/*
1025 * amdgpu_ttm_backend_unbind - Unbind GTT mapped pages
1026 *
1027 * Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and
1028 * ttm_tt_destroy().
1029 */
1030static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
1031 struct ttm_tt *ttm)
1032{
1033 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
1034 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1035 int r;
1036
1037 /* if the pages have userptr pinning then clear that first */
1038 if (gtt->userptr) {
1039 amdgpu_ttm_tt_unpin_userptr(bdev, ttm);
1040 } else if (ttm->sg && gtt->gobj->import_attach) {
1041 struct dma_buf_attachment *attach;
1042
1043 attach = gtt->gobj->import_attach;
1044 dma_buf_unmap_attachment(attach, ttm->sg, DMA_BIDIRECTIONAL);
1045 ttm->sg = NULL;
1046 }
1047
1048 if (!gtt->bound)
1049 return;
1050
1051 if (gtt->offset == AMDGPU_BO_INVALID_OFFSET)
1052 return;
1053
1054 /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
1055 r = amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages);
1056 if (r)
1057 DRM_ERROR("failed to unbind %u pages at 0x%08llX\n",
1058 gtt->ttm.num_pages, gtt->offset);
1059 gtt->bound = false;
1060}
1061
1062static void amdgpu_ttm_backend_destroy(struct ttm_device *bdev,
1063 struct ttm_tt *ttm)
1064{
1065 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1066
1067 amdgpu_ttm_backend_unbind(bdev, ttm);
1068 ttm_tt_destroy_common(bdev, ttm);
1069 if (gtt->usertask)
1070 put_task_struct(gtt->usertask);
1071
1072 ttm_tt_fini(>t->ttm);
1073 kfree(gtt);
1074}
1075
1076/**
1077 * amdgpu_ttm_tt_create - Create a ttm_tt object for a given BO
1078 *
1079 * @bo: The buffer object to create a GTT ttm_tt object around
1080 * @page_flags: Page flags to be added to the ttm_tt object
1081 *
1082 * Called by ttm_tt_create().
1083 */
1084static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
1085 uint32_t page_flags)
1086{
1087 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1088 struct amdgpu_ttm_tt *gtt;
1089 enum ttm_caching caching;
1090
1091 gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
1092 if (gtt == NULL) {
1093 return NULL;
1094 }
1095 gtt->gobj = &bo->base;
1096
1097 if (abo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
1098 caching = ttm_write_combined;
1099 else
1100 caching = ttm_cached;
1101
1102 /* allocate space for the uninitialized page entries */
1103 if (ttm_sg_tt_init(>t->ttm, bo, page_flags, caching)) {
1104 kfree(gtt);
1105 return NULL;
1106 }
1107 return >t->ttm;
1108}
1109
1110/*
1111 * amdgpu_ttm_tt_populate - Map GTT pages visible to the device
1112 *
1113 * Map the pages of a ttm_tt object to an address space visible
1114 * to the underlying device.
1115 */
1116static int amdgpu_ttm_tt_populate(struct ttm_device *bdev,
1117 struct ttm_tt *ttm,
1118 struct ttm_operation_ctx *ctx)
1119{
1120 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
1121 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1122
1123 /* user pages are bound by amdgpu_ttm_tt_pin_userptr() */
1124 if (gtt && gtt->userptr) {
1125 ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
1126 if (!ttm->sg)
1127 return -ENOMEM;
1128 return 0;
1129 }
1130
1131 if (ttm->page_flags & TTM_PAGE_FLAG_SG)
1132 return 0;
1133
1134 return ttm_pool_alloc(&adev->mman.bdev.pool, ttm, ctx);
1135}
1136
1137/*
1138 * amdgpu_ttm_tt_unpopulate - unmap GTT pages and unpopulate page arrays
1139 *
1140 * Unmaps pages of a ttm_tt object from the device address space and
1141 * unpopulates the page array backing it.
1142 */
1143static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev,
1144 struct ttm_tt *ttm)
1145{
1146 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1147 struct amdgpu_device *adev;
1148
1149 if (gtt && gtt->userptr) {
1150 amdgpu_ttm_tt_set_user_pages(ttm, NULL);
1151 kfree(ttm->sg);
1152 ttm->sg = NULL;
1153 return;
1154 }
1155
1156 if (ttm->page_flags & TTM_PAGE_FLAG_SG)
1157 return;
1158
1159 adev = amdgpu_ttm_adev(bdev);
1160 return ttm_pool_free(&adev->mman.bdev.pool, ttm);
1161}
1162
1163/**
1164 * amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt for the current
1165 * task
1166 *
1167 * @bo: The ttm_buffer_object to bind this userptr to
1168 * @addr: The address in the current tasks VM space to use
1169 * @flags: Requirements of userptr object.
1170 *
1171 * Called by amdgpu_gem_userptr_ioctl() to bind userptr pages
1172 * to current task
1173 */
1174int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
1175 uint64_t addr, uint32_t flags)
1176{
1177 struct amdgpu_ttm_tt *gtt;
1178
1179 if (!bo->ttm) {
1180 /* TODO: We want a separate TTM object type for userptrs */
1181 bo->ttm = amdgpu_ttm_tt_create(bo, 0);
1182 if (bo->ttm == NULL)
1183 return -ENOMEM;
1184 }
1185
1186 /* Set TTM_PAGE_FLAG_SG before populate but after create. */
1187 bo->ttm->page_flags |= TTM_PAGE_FLAG_SG;
1188
1189 gtt = (void *)bo->ttm;
1190 gtt->userptr = addr;
1191 gtt->userflags = flags;
1192
1193 if (gtt->usertask)
1194 put_task_struct(gtt->usertask);
1195 gtt->usertask = current->group_leader;
1196 get_task_struct(gtt->usertask);
1197
1198 return 0;
1199}
1200
1201/*
1202 * amdgpu_ttm_tt_get_usermm - Return memory manager for ttm_tt object
1203 */
1204struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
1205{
1206 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1207
1208 if (gtt == NULL)
1209 return NULL;
1210
1211 if (gtt->usertask == NULL)
1212 return NULL;
1213
1214 return gtt->usertask->mm;
1215}
1216
1217/*
1218 * amdgpu_ttm_tt_affect_userptr - Determine if a ttm_tt object lays inside an
1219 * address range for the current task.
1220 *
1221 */
1222bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
1223 unsigned long end)
1224{
1225 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1226 unsigned long size;
1227
1228 if (gtt == NULL || !gtt->userptr)
1229 return false;
1230
1231 /* Return false if no part of the ttm_tt object lies within
1232 * the range
1233 */
1234 size = (unsigned long)gtt->ttm.num_pages * PAGE_SIZE;
1235 if (gtt->userptr > end || gtt->userptr + size <= start)
1236 return false;
1237
1238 return true;
1239}
1240
1241/*
1242 * amdgpu_ttm_tt_is_userptr - Have the pages backing by userptr?
1243 */
1244bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm)
1245{
1246 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1247
1248 if (gtt == NULL || !gtt->userptr)
1249 return false;
1250
1251 return true;
1252}
1253
1254/*
1255 * amdgpu_ttm_tt_is_readonly - Is the ttm_tt object read only?
1256 */
1257bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
1258{
1259 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1260
1261 if (gtt == NULL)
1262 return false;
1263
1264 return !!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
1265}
1266
1267/**
1268 * amdgpu_ttm_tt_pde_flags - Compute PDE flags for ttm_tt object
1269 *
1270 * @ttm: The ttm_tt object to compute the flags for
1271 * @mem: The memory registry backing this ttm_tt object
1272 *
1273 * Figure out the flags to use for a VM PDE (Page Directory Entry).
1274 */
1275uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem)
1276{
1277 uint64_t flags = 0;
1278
1279 if (mem && mem->mem_type != TTM_PL_SYSTEM)
1280 flags |= AMDGPU_PTE_VALID;
1281
1282 if (mem && (mem->mem_type == TTM_PL_TT ||
1283 mem->mem_type == AMDGPU_PL_PREEMPT)) {
1284 flags |= AMDGPU_PTE_SYSTEM;
1285
1286 if (ttm->caching == ttm_cached)
1287 flags |= AMDGPU_PTE_SNOOPED;
1288 }
1289
1290 if (mem && mem->mem_type == TTM_PL_VRAM &&
1291 mem->bus.caching == ttm_cached)
1292 flags |= AMDGPU_PTE_SNOOPED;
1293
1294 return flags;
1295}
1296
1297/**
1298 * amdgpu_ttm_tt_pte_flags - Compute PTE flags for ttm_tt object
1299 *
1300 * @adev: amdgpu_device pointer
1301 * @ttm: The ttm_tt object to compute the flags for
1302 * @mem: The memory registry backing this ttm_tt object
1303 *
1304 * Figure out the flags to use for a VM PTE (Page Table Entry).
1305 */
1306uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
1307 struct ttm_resource *mem)
1308{
1309 uint64_t flags = amdgpu_ttm_tt_pde_flags(ttm, mem);
1310
1311 flags |= adev->gart.gart_pte_flags;
1312 flags |= AMDGPU_PTE_READABLE;
1313
1314 if (!amdgpu_ttm_tt_is_readonly(ttm))
1315 flags |= AMDGPU_PTE_WRITEABLE;
1316
1317 return flags;
1318}
1319
1320/*
1321 * amdgpu_ttm_bo_eviction_valuable - Check to see if we can evict a buffer
1322 * object.
1323 *
1324 * Return true if eviction is sensible. Called by ttm_mem_evict_first() on
1325 * behalf of ttm_bo_mem_force_space() which tries to evict buffer objects until
1326 * it can find space for a new object and by ttm_bo_force_list_clean() which is
1327 * used to clean out a memory space.
1328 */
1329static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
1330 const struct ttm_place *place)
1331{
1332 unsigned long num_pages = bo->resource->num_pages;
1333 struct amdgpu_res_cursor cursor;
1334 struct dma_resv_list *flist;
1335 struct dma_fence *f;
1336 int i;
1337
1338 /* Swapout? */
1339 if (bo->resource->mem_type == TTM_PL_SYSTEM)
1340 return true;
1341
1342 if (bo->type == ttm_bo_type_kernel &&
1343 !amdgpu_vm_evictable(ttm_to_amdgpu_bo(bo)))
1344 return false;
1345
1346 /* If bo is a KFD BO, check if the bo belongs to the current process.
1347 * If true, then return false as any KFD process needs all its BOs to
1348 * be resident to run successfully
1349 */
1350 flist = dma_resv_shared_list(bo->base.resv);
1351 if (flist) {
1352 for (i = 0; i < flist->shared_count; ++i) {
1353 f = rcu_dereference_protected(flist->shared[i],
1354 dma_resv_held(bo->base.resv));
1355 if (amdkfd_fence_check_mm(f, current->mm))
1356 return false;
1357 }
1358 }
1359
1360 switch (bo->resource->mem_type) {
1361 case AMDGPU_PL_PREEMPT:
1362 /* Preemptible BOs don't own system resources managed by the
1363 * driver (pages, VRAM, GART space). They point to resources
1364 * owned by someone else (e.g. pageable memory in user mode
1365 * or a DMABuf). They are used in a preemptible context so we
1366 * can guarantee no deadlocks and good QoS in case of MMU
1367 * notifiers or DMABuf move notifiers from the resource owner.
1368 */
1369 return false;
1370 case TTM_PL_TT:
1371 if (amdgpu_bo_is_amdgpu_bo(bo) &&
1372 amdgpu_bo_encrypted(ttm_to_amdgpu_bo(bo)))
1373 return false;
1374 return true;
1375
1376 case TTM_PL_VRAM:
1377 /* Check each drm MM node individually */
1378 amdgpu_res_first(bo->resource, 0, (u64)num_pages << PAGE_SHIFT,
1379 &cursor);
1380 while (cursor.remaining) {
1381 if (place->fpfn < PFN_DOWN(cursor.start + cursor.size)
1382 && !(place->lpfn &&
1383 place->lpfn <= PFN_DOWN(cursor.start)))
1384 return true;
1385
1386 amdgpu_res_next(&cursor, cursor.size);
1387 }
1388 return false;
1389
1390 default:
1391 break;
1392 }
1393
1394 return ttm_bo_eviction_valuable(bo, place);
1395}
1396
1397/**
1398 * amdgpu_ttm_access_memory - Read or Write memory that backs a buffer object.
1399 *
1400 * @bo: The buffer object to read/write
1401 * @offset: Offset into buffer object
1402 * @buf: Secondary buffer to write/read from
1403 * @len: Length in bytes of access
1404 * @write: true if writing
1405 *
1406 * This is used to access VRAM that backs a buffer object via MMIO
1407 * access for debugging purposes.
1408 */
1409static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
1410 unsigned long offset, void *buf, int len,
1411 int write)
1412{
1413 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1414 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
1415 struct amdgpu_res_cursor cursor;
1416 unsigned long flags;
1417 uint32_t value = 0;
1418 int ret = 0;
1419
1420 if (bo->resource->mem_type != TTM_PL_VRAM)
1421 return -EIO;
1422
1423 amdgpu_res_first(bo->resource, offset, len, &cursor);
1424 while (cursor.remaining) {
1425 uint64_t aligned_pos = cursor.start & ~(uint64_t)3;
1426 uint64_t bytes = 4 - (cursor.start & 3);
1427 uint32_t shift = (cursor.start & 3) * 8;
1428 uint32_t mask = 0xffffffff << shift;
1429
1430 if (cursor.size < bytes) {
1431 mask &= 0xffffffff >> (bytes - cursor.size) * 8;
1432 bytes = cursor.size;
1433 }
1434
1435 if (mask != 0xffffffff) {
1436 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
1437 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000);
1438 WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31);
1439 value = RREG32_NO_KIQ(mmMM_DATA);
1440 if (write) {
1441 value &= ~mask;
1442 value |= (*(uint32_t *)buf << shift) & mask;
1443 WREG32_NO_KIQ(mmMM_DATA, value);
1444 }
1445 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
1446 if (!write) {
1447 value = (value & mask) >> shift;
1448 memcpy(buf, &value, bytes);
1449 }
1450 } else {
1451 bytes = cursor.size & ~0x3ULL;
1452 amdgpu_device_vram_access(adev, cursor.start,
1453 (uint32_t *)buf, bytes,
1454 write);
1455 }
1456
1457 ret += bytes;
1458 buf = (uint8_t *)buf + bytes;
1459 amdgpu_res_next(&cursor, bytes);
1460 }
1461
1462 return ret;
1463}
1464
1465static void
1466amdgpu_bo_delete_mem_notify(struct ttm_buffer_object *bo)
1467{
1468 amdgpu_bo_move_notify(bo, false, NULL);
1469}
1470
1471static struct ttm_device_funcs amdgpu_bo_driver = {
1472 .ttm_tt_create = &amdgpu_ttm_tt_create,
1473 .ttm_tt_populate = &amdgpu_ttm_tt_populate,
1474 .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
1475 .ttm_tt_destroy = &amdgpu_ttm_backend_destroy,
1476 .eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
1477 .evict_flags = &amdgpu_evict_flags,
1478 .move = &amdgpu_bo_move,
1479 .delete_mem_notify = &amdgpu_bo_delete_mem_notify,
1480 .release_notify = &amdgpu_bo_release_notify,
1481 .io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
1482 .io_mem_pfn = amdgpu_ttm_io_mem_pfn,
1483 .access_memory = &amdgpu_ttm_access_memory,
1484 .del_from_lru_notify = &amdgpu_vm_del_from_lru_notify
1485};
1486
1487/*
1488 * Firmware Reservation functions
1489 */
1490/**
1491 * amdgpu_ttm_fw_reserve_vram_fini - free fw reserved vram
1492 *
1493 * @adev: amdgpu_device pointer
1494 *
1495 * free fw reserved vram if it has been reserved.
1496 */
1497static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev)
1498{
1499 amdgpu_bo_free_kernel(&adev->mman.fw_vram_usage_reserved_bo,
1500 NULL, &adev->mman.fw_vram_usage_va);
1501}
1502
1503/**
1504 * amdgpu_ttm_fw_reserve_vram_init - create bo vram reservation from fw
1505 *
1506 * @adev: amdgpu_device pointer
1507 *
1508 * create bo vram reservation from fw.
1509 */
1510static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
1511{
1512 uint64_t vram_size = adev->gmc.visible_vram_size;
1513
1514 adev->mman.fw_vram_usage_va = NULL;
1515 adev->mman.fw_vram_usage_reserved_bo = NULL;
1516
1517 if (adev->mman.fw_vram_usage_size == 0 ||
1518 adev->mman.fw_vram_usage_size > vram_size)
1519 return 0;
1520
1521 return amdgpu_bo_create_kernel_at(adev,
1522 adev->mman.fw_vram_usage_start_offset,
1523 adev->mman.fw_vram_usage_size,
1524 AMDGPU_GEM_DOMAIN_VRAM,
1525 &adev->mman.fw_vram_usage_reserved_bo,
1526 &adev->mman.fw_vram_usage_va);
1527}
1528
1529/*
1530 * Memoy training reservation functions
1531 */
1532
1533/**
1534 * amdgpu_ttm_training_reserve_vram_fini - free memory training reserved vram
1535 *
1536 * @adev: amdgpu_device pointer
1537 *
1538 * free memory training reserved vram if it has been reserved.
1539 */
1540static int amdgpu_ttm_training_reserve_vram_fini(struct amdgpu_device *adev)
1541{
1542 struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
1543
1544 ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
1545 amdgpu_bo_free_kernel(&ctx->c2p_bo, NULL, NULL);
1546 ctx->c2p_bo = NULL;
1547
1548 return 0;
1549}
1550
1551static void amdgpu_ttm_training_data_block_init(struct amdgpu_device *adev)
1552{
1553 struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
1554
1555 memset(ctx, 0, sizeof(*ctx));
1556
1557 ctx->c2p_train_data_offset =
1558 ALIGN((adev->gmc.mc_vram_size - adev->mman.discovery_tmr_size - SZ_1M), SZ_1M);
1559 ctx->p2c_train_data_offset =
1560 (adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET);
1561 ctx->train_data_size =
1562 GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES;
1563
1564 DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
1565 ctx->train_data_size,
1566 ctx->p2c_train_data_offset,
1567 ctx->c2p_train_data_offset);
1568}
1569
1570/*
1571 * reserve TMR memory at the top of VRAM which holds
1572 * IP Discovery data and is protected by PSP.
1573 */
1574static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
1575{
1576 int ret;
1577 struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
1578 bool mem_train_support = false;
1579
1580 if (!amdgpu_sriov_vf(adev)) {
1581 if (amdgpu_atomfirmware_mem_training_supported(adev))
1582 mem_train_support = true;
1583 else
1584 DRM_DEBUG("memory training does not support!\n");
1585 }
1586
1587 /*
1588 * Query reserved tmr size through atom firmwareinfo for Sienna_Cichlid and onwards for all
1589 * the use cases (IP discovery/G6 memory training/profiling/diagnostic data.etc)
1590 *
1591 * Otherwise, fallback to legacy approach to check and reserve tmr block for ip
1592 * discovery data and G6 memory training data respectively
1593 */
1594 adev->mman.discovery_tmr_size =
1595 amdgpu_atomfirmware_get_fw_reserved_fb_size(adev);
1596 if (!adev->mman.discovery_tmr_size)
1597 adev->mman.discovery_tmr_size = DISCOVERY_TMR_OFFSET;
1598
1599 if (mem_train_support) {
1600 /* reserve vram for mem train according to TMR location */
1601 amdgpu_ttm_training_data_block_init(adev);
1602 ret = amdgpu_bo_create_kernel_at(adev,
1603 ctx->c2p_train_data_offset,
1604 ctx->train_data_size,
1605 AMDGPU_GEM_DOMAIN_VRAM,
1606 &ctx->c2p_bo,
1607 NULL);
1608 if (ret) {
1609 DRM_ERROR("alloc c2p_bo failed(%d)!\n", ret);
1610 amdgpu_ttm_training_reserve_vram_fini(adev);
1611 return ret;
1612 }
1613 ctx->init = PSP_MEM_TRAIN_RESERVE_SUCCESS;
1614 }
1615
1616 ret = amdgpu_bo_create_kernel_at(adev,
1617 adev->gmc.real_vram_size - adev->mman.discovery_tmr_size,
1618 adev->mman.discovery_tmr_size,
1619 AMDGPU_GEM_DOMAIN_VRAM,
1620 &adev->mman.discovery_memory,
1621 NULL);
1622 if (ret) {
1623 DRM_ERROR("alloc tmr failed(%d)!\n", ret);
1624 amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL);
1625 return ret;
1626 }
1627
1628 return 0;
1629}
1630
1631/*
1632 * amdgpu_ttm_init - Init the memory management (ttm) as well as various
1633 * gtt/vram related fields.
1634 *
1635 * This initializes all of the memory space pools that the TTM layer
1636 * will need such as the GTT space (system memory mapped to the device),
1637 * VRAM (on-board memory), and on-chip memories (GDS, GWS, OA) which
1638 * can be mapped per VMID.
1639 */
1640int amdgpu_ttm_init(struct amdgpu_device *adev)
1641{
1642 uint64_t gtt_size;
1643 int r;
1644 u64 vis_vram_limit;
1645
1646 mutex_init(&adev->mman.gtt_window_lock);
1647
1648 /* No others user of address space so set it to 0 */
1649 r = ttm_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev,
1650 adev_to_drm(adev)->anon_inode->i_mapping,
1651 adev_to_drm(adev)->vma_offset_manager,
1652 adev->need_swiotlb,
1653 dma_addressing_limited(adev->dev));
1654 if (r) {
1655 DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
1656 return r;
1657 }
1658 adev->mman.initialized = true;
1659
1660 /* Initialize VRAM pool with all of VRAM divided into pages */
1661 r = amdgpu_vram_mgr_init(adev);
1662 if (r) {
1663 DRM_ERROR("Failed initializing VRAM heap.\n");
1664 return r;
1665 }
1666
1667 /* Reduce size of CPU-visible VRAM if requested */
1668 vis_vram_limit = (u64)amdgpu_vis_vram_limit * 1024 * 1024;
1669 if (amdgpu_vis_vram_limit > 0 &&
1670 vis_vram_limit <= adev->gmc.visible_vram_size)
1671 adev->gmc.visible_vram_size = vis_vram_limit;
1672
1673 /* Change the size here instead of the init above so only lpfn is affected */
1674 amdgpu_ttm_set_buffer_funcs_status(adev, false);
1675#ifdef CONFIG_64BIT
1676#ifdef CONFIG_X86
1677 if (adev->gmc.xgmi.connected_to_cpu)
1678 adev->mman.aper_base_kaddr = ioremap_cache(adev->gmc.aper_base,
1679 adev->gmc.visible_vram_size);
1680
1681 else
1682#endif
1683 adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base,
1684 adev->gmc.visible_vram_size);
1685#endif
1686
1687 /*
1688 *The reserved vram for firmware must be pinned to the specified
1689 *place on the VRAM, so reserve it early.
1690 */
1691 r = amdgpu_ttm_fw_reserve_vram_init(adev);
1692 if (r) {
1693 return r;
1694 }
1695
1696 /*
1697 * only NAVI10 and onwards ASIC support for IP discovery.
1698 * If IP discovery enabled, a block of memory should be
1699 * reserved for IP discovey.
1700 */
1701 if (adev->mman.discovery_bin) {
1702 r = amdgpu_ttm_reserve_tmr(adev);
1703 if (r)
1704 return r;
1705 }
1706
1707 /* allocate memory as required for VGA
1708 * This is used for VGA emulation and pre-OS scanout buffers to
1709 * avoid display artifacts while transitioning between pre-OS
1710 * and driver. */
1711 r = amdgpu_bo_create_kernel_at(adev, 0, adev->mman.stolen_vga_size,
1712 AMDGPU_GEM_DOMAIN_VRAM,
1713 &adev->mman.stolen_vga_memory,
1714 NULL);
1715 if (r)
1716 return r;
1717 r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_vga_size,
1718 adev->mman.stolen_extended_size,
1719 AMDGPU_GEM_DOMAIN_VRAM,
1720 &adev->mman.stolen_extended_memory,
1721 NULL);
1722 if (r)
1723 return r;
1724 r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_reserved_offset,
1725 adev->mman.stolen_reserved_size,
1726 AMDGPU_GEM_DOMAIN_VRAM,
1727 &adev->mman.stolen_reserved_memory,
1728 NULL);
1729 if (r)
1730 return r;
1731
1732 DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
1733 (unsigned) (adev->gmc.real_vram_size / (1024 * 1024)));
1734
1735 /* Compute GTT size, either bsaed on 3/4th the size of RAM size
1736 * or whatever the user passed on module init */
1737 if (amdgpu_gtt_size == -1) {
1738 struct sysinfo si;
1739
1740 si_meminfo(&si);
1741 gtt_size = min(max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
1742 adev->gmc.mc_vram_size),
1743 ((uint64_t)si.totalram * si.mem_unit * 3/4));
1744 }
1745 else
1746 gtt_size = (uint64_t)amdgpu_gtt_size << 20;
1747
1748 /* Initialize GTT memory pool */
1749 r = amdgpu_gtt_mgr_init(adev, gtt_size);
1750 if (r) {
1751 DRM_ERROR("Failed initializing GTT heap.\n");
1752 return r;
1753 }
1754 DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
1755 (unsigned)(gtt_size / (1024 * 1024)));
1756
1757 /* Initialize preemptible memory pool */
1758 r = amdgpu_preempt_mgr_init(adev);
1759 if (r) {
1760 DRM_ERROR("Failed initializing PREEMPT heap.\n");
1761 return r;
1762 }
1763
1764 /* Initialize various on-chip memory pools */
1765 r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GDS, adev->gds.gds_size);
1766 if (r) {
1767 DRM_ERROR("Failed initializing GDS heap.\n");
1768 return r;
1769 }
1770
1771 r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GWS, adev->gds.gws_size);
1772 if (r) {
1773 DRM_ERROR("Failed initializing gws heap.\n");
1774 return r;
1775 }
1776
1777 r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_OA, adev->gds.oa_size);
1778 if (r) {
1779 DRM_ERROR("Failed initializing oa heap.\n");
1780 return r;
1781 }
1782
1783 return 0;
1784}
1785
1786/*
1787 * amdgpu_ttm_fini - De-initialize the TTM memory pools
1788 */
1789void amdgpu_ttm_fini(struct amdgpu_device *adev)
1790{
1791 if (!adev->mman.initialized)
1792 return;
1793
1794 amdgpu_ttm_training_reserve_vram_fini(adev);
1795 /* return the stolen vga memory back to VRAM */
1796 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
1797 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
1798 /* return the IP Discovery TMR memory back to VRAM */
1799 amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL);
1800 if (adev->mman.stolen_reserved_size)
1801 amdgpu_bo_free_kernel(&adev->mman.stolen_reserved_memory,
1802 NULL, NULL);
1803 amdgpu_ttm_fw_reserve_vram_fini(adev);
1804
1805 amdgpu_vram_mgr_fini(adev);
1806 amdgpu_gtt_mgr_fini(adev);
1807 amdgpu_preempt_mgr_fini(adev);
1808 ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GDS);
1809 ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS);
1810 ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA);
1811 ttm_device_fini(&adev->mman.bdev);
1812 adev->mman.initialized = false;
1813 DRM_INFO("amdgpu: ttm finalized\n");
1814}
1815
1816/**
1817 * amdgpu_ttm_set_buffer_funcs_status - enable/disable use of buffer functions
1818 *
1819 * @adev: amdgpu_device pointer
1820 * @enable: true when we can use buffer functions.
1821 *
1822 * Enable/disable use of buffer functions during suspend/resume. This should
1823 * only be called at bootup or when userspace isn't running.
1824 */
1825void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
1826{
1827 struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
1828 uint64_t size;
1829 int r;
1830
1831 if (!adev->mman.initialized || amdgpu_in_reset(adev) ||
1832 adev->mman.buffer_funcs_enabled == enable)
1833 return;
1834
1835 if (enable) {
1836 struct amdgpu_ring *ring;
1837 struct drm_gpu_scheduler *sched;
1838
1839 ring = adev->mman.buffer_funcs_ring;
1840 sched = &ring->sched;
1841 r = drm_sched_entity_init(&adev->mman.entity,
1842 DRM_SCHED_PRIORITY_KERNEL, &sched,
1843 1, NULL);
1844 if (r) {
1845 DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
1846 r);
1847 return;
1848 }
1849 } else {
1850 drm_sched_entity_destroy(&adev->mman.entity);
1851 dma_fence_put(man->move);
1852 man->move = NULL;
1853 }
1854
1855 /* this just adjusts TTM size idea, which sets lpfn to the correct value */
1856 if (enable)
1857 size = adev->gmc.real_vram_size;
1858 else
1859 size = adev->gmc.visible_vram_size;
1860 man->size = size >> PAGE_SHIFT;
1861 adev->mman.buffer_funcs_enabled = enable;
1862}
1863
1864int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
1865 uint64_t dst_offset, uint32_t byte_count,
1866 struct dma_resv *resv,
1867 struct dma_fence **fence, bool direct_submit,
1868 bool vm_needs_flush, bool tmz)
1869{
1870 enum amdgpu_ib_pool_type pool = direct_submit ? AMDGPU_IB_POOL_DIRECT :
1871 AMDGPU_IB_POOL_DELAYED;
1872 struct amdgpu_device *adev = ring->adev;
1873 struct amdgpu_job *job;
1874
1875 uint32_t max_bytes;
1876 unsigned num_loops, num_dw;
1877 unsigned i;
1878 int r;
1879
1880 if (direct_submit && !ring->sched.ready) {
1881 DRM_ERROR("Trying to move memory with ring turned off.\n");
1882 return -EINVAL;
1883 }
1884
1885 max_bytes = adev->mman.buffer_funcs->copy_max_bytes;
1886 num_loops = DIV_ROUND_UP(byte_count, max_bytes);
1887 num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8);
1888
1889 r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, pool, &job);
1890 if (r)
1891 return r;
1892
1893 if (vm_needs_flush) {
1894 job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo ?
1895 adev->gmc.pdb0_bo : adev->gart.bo);
1896 job->vm_needs_flush = true;
1897 }
1898 if (resv) {
1899 r = amdgpu_sync_resv(adev, &job->sync, resv,
1900 AMDGPU_SYNC_ALWAYS,
1901 AMDGPU_FENCE_OWNER_UNDEFINED);
1902 if (r) {
1903 DRM_ERROR("sync failed (%d).\n", r);
1904 goto error_free;
1905 }
1906 }
1907
1908 for (i = 0; i < num_loops; i++) {
1909 uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
1910
1911 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset,
1912 dst_offset, cur_size_in_bytes, tmz);
1913
1914 src_offset += cur_size_in_bytes;
1915 dst_offset += cur_size_in_bytes;
1916 byte_count -= cur_size_in_bytes;
1917 }
1918
1919 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
1920 WARN_ON(job->ibs[0].length_dw > num_dw);
1921 if (direct_submit)
1922 r = amdgpu_job_submit_direct(job, ring, fence);
1923 else
1924 r = amdgpu_job_submit(job, &adev->mman.entity,
1925 AMDGPU_FENCE_OWNER_UNDEFINED, fence);
1926 if (r)
1927 goto error_free;
1928
1929 return r;
1930
1931error_free:
1932 amdgpu_job_free(job);
1933 DRM_ERROR("Error scheduling IBs (%d)\n", r);
1934 return r;
1935}
1936
1937int amdgpu_fill_buffer(struct amdgpu_bo *bo,
1938 uint32_t src_data,
1939 struct dma_resv *resv,
1940 struct dma_fence **fence)
1941{
1942 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1943 uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
1944 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
1945
1946 struct amdgpu_res_cursor cursor;
1947 unsigned int num_loops, num_dw;
1948 uint64_t num_bytes;
1949
1950 struct amdgpu_job *job;
1951 int r;
1952
1953 if (!adev->mman.buffer_funcs_enabled) {
1954 DRM_ERROR("Trying to clear memory with ring turned off.\n");
1955 return -EINVAL;
1956 }
1957
1958 if (bo->tbo.resource->mem_type == AMDGPU_PL_PREEMPT) {
1959 DRM_ERROR("Trying to clear preemptible memory.\n");
1960 return -EINVAL;
1961 }
1962
1963 if (bo->tbo.resource->mem_type == TTM_PL_TT) {
1964 r = amdgpu_ttm_alloc_gart(&bo->tbo);
1965 if (r)
1966 return r;
1967 }
1968
1969 num_bytes = bo->tbo.resource->num_pages << PAGE_SHIFT;
1970 num_loops = 0;
1971
1972 amdgpu_res_first(bo->tbo.resource, 0, num_bytes, &cursor);
1973 while (cursor.remaining) {
1974 num_loops += DIV_ROUND_UP_ULL(cursor.size, max_bytes);
1975 amdgpu_res_next(&cursor, cursor.size);
1976 }
1977 num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw;
1978
1979 /* for IB padding */
1980 num_dw += 64;
1981
1982 r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, AMDGPU_IB_POOL_DELAYED,
1983 &job);
1984 if (r)
1985 return r;
1986
1987 if (resv) {
1988 r = amdgpu_sync_resv(adev, &job->sync, resv,
1989 AMDGPU_SYNC_ALWAYS,
1990 AMDGPU_FENCE_OWNER_UNDEFINED);
1991 if (r) {
1992 DRM_ERROR("sync failed (%d).\n", r);
1993 goto error_free;
1994 }
1995 }
1996
1997 amdgpu_res_first(bo->tbo.resource, 0, num_bytes, &cursor);
1998 while (cursor.remaining) {
1999 uint32_t cur_size = min_t(uint64_t, cursor.size, max_bytes);
2000 uint64_t dst_addr = cursor.start;
2001
2002 dst_addr += amdgpu_ttm_domain_start(adev,
2003 bo->tbo.resource->mem_type);
2004 amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data, dst_addr,
2005 cur_size);
2006
2007 amdgpu_res_next(&cursor, cur_size);
2008 }
2009
2010 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
2011 WARN_ON(job->ibs[0].length_dw > num_dw);
2012 r = amdgpu_job_submit(job, &adev->mman.entity,
2013 AMDGPU_FENCE_OWNER_UNDEFINED, fence);
2014 if (r)
2015 goto error_free;
2016
2017 return 0;
2018
2019error_free:
2020 amdgpu_job_free(job);
2021 return r;
2022}
2023
2024#if defined(CONFIG_DEBUG_FS)
2025
2026static int amdgpu_mm_vram_table_show(struct seq_file *m, void *unused)
2027{
2028 struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
2029 struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev,
2030 TTM_PL_VRAM);
2031 struct drm_printer p = drm_seq_file_printer(m);
2032
2033 man->func->debug(man, &p);
2034 return 0;
2035}
2036
2037static int amdgpu_ttm_page_pool_show(struct seq_file *m, void *unused)
2038{
2039 struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
2040
2041 return ttm_pool_debugfs(&adev->mman.bdev.pool, m);
2042}
2043
2044static int amdgpu_mm_tt_table_show(struct seq_file *m, void *unused)
2045{
2046 struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
2047 struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev,
2048 TTM_PL_TT);
2049 struct drm_printer p = drm_seq_file_printer(m);
2050
2051 man->func->debug(man, &p);
2052 return 0;
2053}
2054
2055static int amdgpu_mm_gds_table_show(struct seq_file *m, void *unused)
2056{
2057 struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
2058 struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev,
2059 AMDGPU_PL_GDS);
2060 struct drm_printer p = drm_seq_file_printer(m);
2061
2062 man->func->debug(man, &p);
2063 return 0;
2064}
2065
2066static int amdgpu_mm_gws_table_show(struct seq_file *m, void *unused)
2067{
2068 struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
2069 struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev,
2070 AMDGPU_PL_GWS);
2071 struct drm_printer p = drm_seq_file_printer(m);
2072
2073 man->func->debug(man, &p);
2074 return 0;
2075}
2076
2077static int amdgpu_mm_oa_table_show(struct seq_file *m, void *unused)
2078{
2079 struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
2080 struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev,
2081 AMDGPU_PL_OA);
2082 struct drm_printer p = drm_seq_file_printer(m);
2083
2084 man->func->debug(man, &p);
2085 return 0;
2086}
2087
2088DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_vram_table);
2089DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_tt_table);
2090DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_gds_table);
2091DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_gws_table);
2092DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_oa_table);
2093DEFINE_SHOW_ATTRIBUTE(amdgpu_ttm_page_pool);
2094
2095/*
2096 * amdgpu_ttm_vram_read - Linear read access to VRAM
2097 *
2098 * Accesses VRAM via MMIO for debugging purposes.
2099 */
2100static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
2101 size_t size, loff_t *pos)
2102{
2103 struct amdgpu_device *adev = file_inode(f)->i_private;
2104 ssize_t result = 0;
2105
2106 if (size & 0x3 || *pos & 0x3)
2107 return -EINVAL;
2108
2109 if (*pos >= adev->gmc.mc_vram_size)
2110 return -ENXIO;
2111
2112 size = min(size, (size_t)(adev->gmc.mc_vram_size - *pos));
2113 while (size) {
2114 size_t bytes = min(size, AMDGPU_TTM_VRAM_MAX_DW_READ * 4);
2115 uint32_t value[AMDGPU_TTM_VRAM_MAX_DW_READ];
2116
2117 amdgpu_device_vram_access(adev, *pos, value, bytes, false);
2118 if (copy_to_user(buf, value, bytes))
2119 return -EFAULT;
2120
2121 result += bytes;
2122 buf += bytes;
2123 *pos += bytes;
2124 size -= bytes;
2125 }
2126
2127 return result;
2128}
2129
2130/*
2131 * amdgpu_ttm_vram_write - Linear write access to VRAM
2132 *
2133 * Accesses VRAM via MMIO for debugging purposes.
2134 */
2135static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf,
2136 size_t size, loff_t *pos)
2137{
2138 struct amdgpu_device *adev = file_inode(f)->i_private;
2139 ssize_t result = 0;
2140 int r;
2141
2142 if (size & 0x3 || *pos & 0x3)
2143 return -EINVAL;
2144
2145 if (*pos >= adev->gmc.mc_vram_size)
2146 return -ENXIO;
2147
2148 while (size) {
2149 unsigned long flags;
2150 uint32_t value;
2151
2152 if (*pos >= adev->gmc.mc_vram_size)
2153 return result;
2154
2155 r = get_user(value, (uint32_t *)buf);
2156 if (r)
2157 return r;
2158
2159 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
2160 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
2161 WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31);
2162 WREG32_NO_KIQ(mmMM_DATA, value);
2163 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
2164
2165 result += 4;
2166 buf += 4;
2167 *pos += 4;
2168 size -= 4;
2169 }
2170
2171 return result;
2172}
2173
2174static const struct file_operations amdgpu_ttm_vram_fops = {
2175 .owner = THIS_MODULE,
2176 .read = amdgpu_ttm_vram_read,
2177 .write = amdgpu_ttm_vram_write,
2178 .llseek = default_llseek,
2179};
2180
2181/*
2182 * amdgpu_iomem_read - Virtual read access to GPU mapped memory
2183 *
2184 * This function is used to read memory that has been mapped to the
2185 * GPU and the known addresses are not physical addresses but instead
2186 * bus addresses (e.g., what you'd put in an IB or ring buffer).
2187 */
2188static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf,
2189 size_t size, loff_t *pos)
2190{
2191 struct amdgpu_device *adev = file_inode(f)->i_private;
2192 struct iommu_domain *dom;
2193 ssize_t result = 0;
2194 int r;
2195
2196 /* retrieve the IOMMU domain if any for this device */
2197 dom = iommu_get_domain_for_dev(adev->dev);
2198
2199 while (size) {
2200 phys_addr_t addr = *pos & PAGE_MASK;
2201 loff_t off = *pos & ~PAGE_MASK;
2202 size_t bytes = PAGE_SIZE - off;
2203 unsigned long pfn;
2204 struct page *p;
2205 void *ptr;
2206
2207 bytes = bytes < size ? bytes : size;
2208
2209 /* Translate the bus address to a physical address. If
2210 * the domain is NULL it means there is no IOMMU active
2211 * and the address translation is the identity
2212 */
2213 addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
2214
2215 pfn = addr >> PAGE_SHIFT;
2216 if (!pfn_valid(pfn))
2217 return -EPERM;
2218
2219 p = pfn_to_page(pfn);
2220 if (p->mapping != adev->mman.bdev.dev_mapping)
2221 return -EPERM;
2222
2223 ptr = kmap(p);
2224 r = copy_to_user(buf, ptr + off, bytes);
2225 kunmap(p);
2226 if (r)
2227 return -EFAULT;
2228
2229 size -= bytes;
2230 *pos += bytes;
2231 result += bytes;
2232 }
2233
2234 return result;
2235}
2236
2237/*
2238 * amdgpu_iomem_write - Virtual write access to GPU mapped memory
2239 *
2240 * This function is used to write memory that has been mapped to the
2241 * GPU and the known addresses are not physical addresses but instead
2242 * bus addresses (e.g., what you'd put in an IB or ring buffer).
2243 */
2244static ssize_t amdgpu_iomem_write(struct file *f, const char __user *buf,
2245 size_t size, loff_t *pos)
2246{
2247 struct amdgpu_device *adev = file_inode(f)->i_private;
2248 struct iommu_domain *dom;
2249 ssize_t result = 0;
2250 int r;
2251
2252 dom = iommu_get_domain_for_dev(adev->dev);
2253
2254 while (size) {
2255 phys_addr_t addr = *pos & PAGE_MASK;
2256 loff_t off = *pos & ~PAGE_MASK;
2257 size_t bytes = PAGE_SIZE - off;
2258 unsigned long pfn;
2259 struct page *p;
2260 void *ptr;
2261
2262 bytes = bytes < size ? bytes : size;
2263
2264 addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
2265
2266 pfn = addr >> PAGE_SHIFT;
2267 if (!pfn_valid(pfn))
2268 return -EPERM;
2269
2270 p = pfn_to_page(pfn);
2271 if (p->mapping != adev->mman.bdev.dev_mapping)
2272 return -EPERM;
2273
2274 ptr = kmap(p);
2275 r = copy_from_user(ptr + off, buf, bytes);
2276 kunmap(p);
2277 if (r)
2278 return -EFAULT;
2279
2280 size -= bytes;
2281 *pos += bytes;
2282 result += bytes;
2283 }
2284
2285 return result;
2286}
2287
2288static const struct file_operations amdgpu_ttm_iomem_fops = {
2289 .owner = THIS_MODULE,
2290 .read = amdgpu_iomem_read,
2291 .write = amdgpu_iomem_write,
2292 .llseek = default_llseek
2293};
2294
2295#endif
2296
2297void amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
2298{
2299#if defined(CONFIG_DEBUG_FS)
2300 struct drm_minor *minor = adev_to_drm(adev)->primary;
2301 struct dentry *root = minor->debugfs_root;
2302
2303 debugfs_create_file_size("amdgpu_vram", 0444, root, adev,
2304 &amdgpu_ttm_vram_fops, adev->gmc.mc_vram_size);
2305 debugfs_create_file("amdgpu_iomem", 0444, root, adev,
2306 &amdgpu_ttm_iomem_fops);
2307 debugfs_create_file("amdgpu_vram_mm", 0444, root, adev,
2308 &amdgpu_mm_vram_table_fops);
2309 debugfs_create_file("amdgpu_gtt_mm", 0444, root, adev,
2310 &amdgpu_mm_tt_table_fops);
2311 debugfs_create_file("amdgpu_gds_mm", 0444, root, adev,
2312 &amdgpu_mm_gds_table_fops);
2313 debugfs_create_file("amdgpu_gws_mm", 0444, root, adev,
2314 &amdgpu_mm_gws_table_fops);
2315 debugfs_create_file("amdgpu_oa_mm", 0444, root, adev,
2316 &amdgpu_mm_oa_table_fops);
2317 debugfs_create_file("ttm_page_pool", 0444, root, adev,
2318 &amdgpu_ttm_page_pool_fops);
2319#endif
2320}
1/*
2 * Copyright 2009 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30 * Dave Airlie
31 */
32#include <drm/ttm/ttm_bo_api.h>
33#include <drm/ttm/ttm_bo_driver.h>
34#include <drm/ttm/ttm_placement.h>
35#include <drm/ttm/ttm_module.h>
36#include <drm/ttm/ttm_page_alloc.h>
37#include <drm/drmP.h>
38#include <drm/amdgpu_drm.h>
39#include <linux/seq_file.h>
40#include <linux/slab.h>
41#include <linux/swiotlb.h>
42#include <linux/swap.h>
43#include <linux/pagemap.h>
44#include <linux/debugfs.h>
45#include <linux/iommu.h>
46#include "amdgpu.h"
47#include "amdgpu_object.h"
48#include "amdgpu_trace.h"
49#include "amdgpu_amdkfd.h"
50#include "bif/bif_4_1_d.h"
51
52#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
53
54static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
55 struct ttm_mem_reg *mem, unsigned num_pages,
56 uint64_t offset, unsigned window,
57 struct amdgpu_ring *ring,
58 uint64_t *addr);
59
60static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
61static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev);
62
63/*
64 * Global memory.
65 */
66static int amdgpu_ttm_mem_global_init(struct drm_global_reference *ref)
67{
68 return ttm_mem_global_init(ref->object);
69}
70
71static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref)
72{
73 ttm_mem_global_release(ref->object);
74}
75
76static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
77{
78 struct drm_global_reference *global_ref;
79 struct amdgpu_ring *ring;
80 struct drm_sched_rq *rq;
81 int r;
82
83 adev->mman.mem_global_referenced = false;
84 global_ref = &adev->mman.mem_global_ref;
85 global_ref->global_type = DRM_GLOBAL_TTM_MEM;
86 global_ref->size = sizeof(struct ttm_mem_global);
87 global_ref->init = &amdgpu_ttm_mem_global_init;
88 global_ref->release = &amdgpu_ttm_mem_global_release;
89 r = drm_global_item_ref(global_ref);
90 if (r) {
91 DRM_ERROR("Failed setting up TTM memory accounting "
92 "subsystem.\n");
93 goto error_mem;
94 }
95
96 adev->mman.bo_global_ref.mem_glob =
97 adev->mman.mem_global_ref.object;
98 global_ref = &adev->mman.bo_global_ref.ref;
99 global_ref->global_type = DRM_GLOBAL_TTM_BO;
100 global_ref->size = sizeof(struct ttm_bo_global);
101 global_ref->init = &ttm_bo_global_init;
102 global_ref->release = &ttm_bo_global_release;
103 r = drm_global_item_ref(global_ref);
104 if (r) {
105 DRM_ERROR("Failed setting up TTM BO subsystem.\n");
106 goto error_bo;
107 }
108
109 mutex_init(&adev->mman.gtt_window_lock);
110
111 ring = adev->mman.buffer_funcs_ring;
112 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
113 r = drm_sched_entity_init(&ring->sched, &adev->mman.entity,
114 rq, amdgpu_sched_jobs, NULL);
115 if (r) {
116 DRM_ERROR("Failed setting up TTM BO move run queue.\n");
117 goto error_entity;
118 }
119
120 adev->mman.mem_global_referenced = true;
121
122 return 0;
123
124error_entity:
125 drm_global_item_unref(&adev->mman.bo_global_ref.ref);
126error_bo:
127 drm_global_item_unref(&adev->mman.mem_global_ref);
128error_mem:
129 return r;
130}
131
132static void amdgpu_ttm_global_fini(struct amdgpu_device *adev)
133{
134 if (adev->mman.mem_global_referenced) {
135 drm_sched_entity_fini(adev->mman.entity.sched,
136 &adev->mman.entity);
137 mutex_destroy(&adev->mman.gtt_window_lock);
138 drm_global_item_unref(&adev->mman.bo_global_ref.ref);
139 drm_global_item_unref(&adev->mman.mem_global_ref);
140 adev->mman.mem_global_referenced = false;
141 }
142}
143
144static int amdgpu_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
145{
146 return 0;
147}
148
149static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
150 struct ttm_mem_type_manager *man)
151{
152 struct amdgpu_device *adev;
153
154 adev = amdgpu_ttm_adev(bdev);
155
156 switch (type) {
157 case TTM_PL_SYSTEM:
158 /* System memory */
159 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
160 man->available_caching = TTM_PL_MASK_CACHING;
161 man->default_caching = TTM_PL_FLAG_CACHED;
162 break;
163 case TTM_PL_TT:
164 man->func = &amdgpu_gtt_mgr_func;
165 man->gpu_offset = adev->gmc.gart_start;
166 man->available_caching = TTM_PL_MASK_CACHING;
167 man->default_caching = TTM_PL_FLAG_CACHED;
168 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
169 break;
170 case TTM_PL_VRAM:
171 /* "On-card" video ram */
172 man->func = &amdgpu_vram_mgr_func;
173 man->gpu_offset = adev->gmc.vram_start;
174 man->flags = TTM_MEMTYPE_FLAG_FIXED |
175 TTM_MEMTYPE_FLAG_MAPPABLE;
176 man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
177 man->default_caching = TTM_PL_FLAG_WC;
178 break;
179 case AMDGPU_PL_GDS:
180 case AMDGPU_PL_GWS:
181 case AMDGPU_PL_OA:
182 /* On-chip GDS memory*/
183 man->func = &ttm_bo_manager_func;
184 man->gpu_offset = 0;
185 man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_CMA;
186 man->available_caching = TTM_PL_FLAG_UNCACHED;
187 man->default_caching = TTM_PL_FLAG_UNCACHED;
188 break;
189 default:
190 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
191 return -EINVAL;
192 }
193 return 0;
194}
195
196static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
197 struct ttm_placement *placement)
198{
199 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
200 struct amdgpu_bo *abo;
201 static const struct ttm_place placements = {
202 .fpfn = 0,
203 .lpfn = 0,
204 .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
205 };
206
207 if (bo->type == ttm_bo_type_sg) {
208 placement->num_placement = 0;
209 placement->num_busy_placement = 0;
210 return;
211 }
212
213 if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) {
214 placement->placement = &placements;
215 placement->busy_placement = &placements;
216 placement->num_placement = 1;
217 placement->num_busy_placement = 1;
218 return;
219 }
220 abo = ttm_to_amdgpu_bo(bo);
221 switch (bo->mem.mem_type) {
222 case TTM_PL_VRAM:
223 if (!adev->mman.buffer_funcs_enabled) {
224 amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
225 } else if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
226 !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
227 unsigned fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
228 struct drm_mm_node *node = bo->mem.mm_node;
229 unsigned long pages_left;
230
231 for (pages_left = bo->mem.num_pages;
232 pages_left;
233 pages_left -= node->size, node++) {
234 if (node->start < fpfn)
235 break;
236 }
237
238 if (!pages_left)
239 goto gtt;
240
241 /* Try evicting to the CPU inaccessible part of VRAM
242 * first, but only set GTT as busy placement, so this
243 * BO will be evicted to GTT rather than causing other
244 * BOs to be evicted from VRAM
245 */
246 amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
247 AMDGPU_GEM_DOMAIN_GTT);
248 abo->placements[0].fpfn = fpfn;
249 abo->placements[0].lpfn = 0;
250 abo->placement.busy_placement = &abo->placements[1];
251 abo->placement.num_busy_placement = 1;
252 } else {
253gtt:
254 amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
255 }
256 break;
257 case TTM_PL_TT:
258 default:
259 amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
260 }
261 *placement = abo->placement;
262}
263
264static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
265{
266 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
267
268 /*
269 * Don't verify access for KFD BOs. They don't have a GEM
270 * object associated with them.
271 */
272 if (abo->kfd_bo)
273 return 0;
274
275 if (amdgpu_ttm_tt_get_usermm(bo->ttm))
276 return -EPERM;
277 return drm_vma_node_verify_access(&abo->gem_base.vma_node,
278 filp->private_data);
279}
280
281static void amdgpu_move_null(struct ttm_buffer_object *bo,
282 struct ttm_mem_reg *new_mem)
283{
284 struct ttm_mem_reg *old_mem = &bo->mem;
285
286 BUG_ON(old_mem->mm_node != NULL);
287 *old_mem = *new_mem;
288 new_mem->mm_node = NULL;
289}
290
291static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
292 struct drm_mm_node *mm_node,
293 struct ttm_mem_reg *mem)
294{
295 uint64_t addr = 0;
296
297 if (mem->mem_type != TTM_PL_TT || amdgpu_gtt_mgr_has_gart_addr(mem)) {
298 addr = mm_node->start << PAGE_SHIFT;
299 addr += bo->bdev->man[mem->mem_type].gpu_offset;
300 }
301 return addr;
302}
303
304/**
305 * amdgpu_find_mm_node - Helper function finds the drm_mm_node
306 * corresponding to @offset. It also modifies the offset to be
307 * within the drm_mm_node returned
308 */
309static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem,
310 unsigned long *offset)
311{
312 struct drm_mm_node *mm_node = mem->mm_node;
313
314 while (*offset >= (mm_node->size << PAGE_SHIFT)) {
315 *offset -= (mm_node->size << PAGE_SHIFT);
316 ++mm_node;
317 }
318 return mm_node;
319}
320
321/**
322 * amdgpu_copy_ttm_mem_to_mem - Helper function for copy
323 *
324 * The function copies @size bytes from {src->mem + src->offset} to
325 * {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a
326 * move and different for a BO to BO copy.
327 *
328 * @f: Returns the last fence if multiple jobs are submitted.
329 */
330int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
331 struct amdgpu_copy_mem *src,
332 struct amdgpu_copy_mem *dst,
333 uint64_t size,
334 struct reservation_object *resv,
335 struct dma_fence **f)
336{
337 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
338 struct drm_mm_node *src_mm, *dst_mm;
339 uint64_t src_node_start, dst_node_start, src_node_size,
340 dst_node_size, src_page_offset, dst_page_offset;
341 struct dma_fence *fence = NULL;
342 int r = 0;
343 const uint64_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE *
344 AMDGPU_GPU_PAGE_SIZE);
345
346 if (!adev->mman.buffer_funcs_enabled) {
347 DRM_ERROR("Trying to move memory with ring turned off.\n");
348 return -EINVAL;
349 }
350
351 src_mm = amdgpu_find_mm_node(src->mem, &src->offset);
352 src_node_start = amdgpu_mm_node_addr(src->bo, src_mm, src->mem) +
353 src->offset;
354 src_node_size = (src_mm->size << PAGE_SHIFT) - src->offset;
355 src_page_offset = src_node_start & (PAGE_SIZE - 1);
356
357 dst_mm = amdgpu_find_mm_node(dst->mem, &dst->offset);
358 dst_node_start = amdgpu_mm_node_addr(dst->bo, dst_mm, dst->mem) +
359 dst->offset;
360 dst_node_size = (dst_mm->size << PAGE_SHIFT) - dst->offset;
361 dst_page_offset = dst_node_start & (PAGE_SIZE - 1);
362
363 mutex_lock(&adev->mman.gtt_window_lock);
364
365 while (size) {
366 unsigned long cur_size;
367 uint64_t from = src_node_start, to = dst_node_start;
368 struct dma_fence *next;
369
370 /* Copy size cannot exceed GTT_MAX_BYTES. So if src or dst
371 * begins at an offset, then adjust the size accordingly
372 */
373 cur_size = min3(min(src_node_size, dst_node_size), size,
374 GTT_MAX_BYTES);
375 if (cur_size + src_page_offset > GTT_MAX_BYTES ||
376 cur_size + dst_page_offset > GTT_MAX_BYTES)
377 cur_size -= max(src_page_offset, dst_page_offset);
378
379 /* Map only what needs to be accessed. Map src to window 0 and
380 * dst to window 1
381 */
382 if (src->mem->mem_type == TTM_PL_TT &&
383 !amdgpu_gtt_mgr_has_gart_addr(src->mem)) {
384 r = amdgpu_map_buffer(src->bo, src->mem,
385 PFN_UP(cur_size + src_page_offset),
386 src_node_start, 0, ring,
387 &from);
388 if (r)
389 goto error;
390 /* Adjust the offset because amdgpu_map_buffer returns
391 * start of mapped page
392 */
393 from += src_page_offset;
394 }
395
396 if (dst->mem->mem_type == TTM_PL_TT &&
397 !amdgpu_gtt_mgr_has_gart_addr(dst->mem)) {
398 r = amdgpu_map_buffer(dst->bo, dst->mem,
399 PFN_UP(cur_size + dst_page_offset),
400 dst_node_start, 1, ring,
401 &to);
402 if (r)
403 goto error;
404 to += dst_page_offset;
405 }
406
407 r = amdgpu_copy_buffer(ring, from, to, cur_size,
408 resv, &next, false, true);
409 if (r)
410 goto error;
411
412 dma_fence_put(fence);
413 fence = next;
414
415 size -= cur_size;
416 if (!size)
417 break;
418
419 src_node_size -= cur_size;
420 if (!src_node_size) {
421 src_node_start = amdgpu_mm_node_addr(src->bo, ++src_mm,
422 src->mem);
423 src_node_size = (src_mm->size << PAGE_SHIFT);
424 } else {
425 src_node_start += cur_size;
426 src_page_offset = src_node_start & (PAGE_SIZE - 1);
427 }
428 dst_node_size -= cur_size;
429 if (!dst_node_size) {
430 dst_node_start = amdgpu_mm_node_addr(dst->bo, ++dst_mm,
431 dst->mem);
432 dst_node_size = (dst_mm->size << PAGE_SHIFT);
433 } else {
434 dst_node_start += cur_size;
435 dst_page_offset = dst_node_start & (PAGE_SIZE - 1);
436 }
437 }
438error:
439 mutex_unlock(&adev->mman.gtt_window_lock);
440 if (f)
441 *f = dma_fence_get(fence);
442 dma_fence_put(fence);
443 return r;
444}
445
446
447static int amdgpu_move_blit(struct ttm_buffer_object *bo,
448 bool evict, bool no_wait_gpu,
449 struct ttm_mem_reg *new_mem,
450 struct ttm_mem_reg *old_mem)
451{
452 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
453 struct amdgpu_copy_mem src, dst;
454 struct dma_fence *fence = NULL;
455 int r;
456
457 src.bo = bo;
458 dst.bo = bo;
459 src.mem = old_mem;
460 dst.mem = new_mem;
461 src.offset = 0;
462 dst.offset = 0;
463
464 r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst,
465 new_mem->num_pages << PAGE_SHIFT,
466 bo->resv, &fence);
467 if (r)
468 goto error;
469
470 r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
471 dma_fence_put(fence);
472 return r;
473
474error:
475 if (fence)
476 dma_fence_wait(fence, false);
477 dma_fence_put(fence);
478 return r;
479}
480
481static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
482 struct ttm_operation_ctx *ctx,
483 struct ttm_mem_reg *new_mem)
484{
485 struct amdgpu_device *adev;
486 struct ttm_mem_reg *old_mem = &bo->mem;
487 struct ttm_mem_reg tmp_mem;
488 struct ttm_place placements;
489 struct ttm_placement placement;
490 int r;
491
492 adev = amdgpu_ttm_adev(bo->bdev);
493 tmp_mem = *new_mem;
494 tmp_mem.mm_node = NULL;
495 placement.num_placement = 1;
496 placement.placement = &placements;
497 placement.num_busy_placement = 1;
498 placement.busy_placement = &placements;
499 placements.fpfn = 0;
500 placements.lpfn = 0;
501 placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
502 r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
503 if (unlikely(r)) {
504 return r;
505 }
506
507 r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
508 if (unlikely(r)) {
509 goto out_cleanup;
510 }
511
512 r = ttm_tt_bind(bo->ttm, &tmp_mem, ctx);
513 if (unlikely(r)) {
514 goto out_cleanup;
515 }
516 r = amdgpu_move_blit(bo, true, ctx->no_wait_gpu, &tmp_mem, old_mem);
517 if (unlikely(r)) {
518 goto out_cleanup;
519 }
520 r = ttm_bo_move_ttm(bo, ctx, new_mem);
521out_cleanup:
522 ttm_bo_mem_put(bo, &tmp_mem);
523 return r;
524}
525
526static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict,
527 struct ttm_operation_ctx *ctx,
528 struct ttm_mem_reg *new_mem)
529{
530 struct amdgpu_device *adev;
531 struct ttm_mem_reg *old_mem = &bo->mem;
532 struct ttm_mem_reg tmp_mem;
533 struct ttm_placement placement;
534 struct ttm_place placements;
535 int r;
536
537 adev = amdgpu_ttm_adev(bo->bdev);
538 tmp_mem = *new_mem;
539 tmp_mem.mm_node = NULL;
540 placement.num_placement = 1;
541 placement.placement = &placements;
542 placement.num_busy_placement = 1;
543 placement.busy_placement = &placements;
544 placements.fpfn = 0;
545 placements.lpfn = 0;
546 placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
547 r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
548 if (unlikely(r)) {
549 return r;
550 }
551 r = ttm_bo_move_ttm(bo, ctx, &tmp_mem);
552 if (unlikely(r)) {
553 goto out_cleanup;
554 }
555 r = amdgpu_move_blit(bo, true, ctx->no_wait_gpu, new_mem, old_mem);
556 if (unlikely(r)) {
557 goto out_cleanup;
558 }
559out_cleanup:
560 ttm_bo_mem_put(bo, &tmp_mem);
561 return r;
562}
563
564static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
565 struct ttm_operation_ctx *ctx,
566 struct ttm_mem_reg *new_mem)
567{
568 struct amdgpu_device *adev;
569 struct amdgpu_bo *abo;
570 struct ttm_mem_reg *old_mem = &bo->mem;
571 int r;
572
573 /* Can't move a pinned BO */
574 abo = ttm_to_amdgpu_bo(bo);
575 if (WARN_ON_ONCE(abo->pin_count > 0))
576 return -EINVAL;
577
578 adev = amdgpu_ttm_adev(bo->bdev);
579
580 if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
581 amdgpu_move_null(bo, new_mem);
582 return 0;
583 }
584 if ((old_mem->mem_type == TTM_PL_TT &&
585 new_mem->mem_type == TTM_PL_SYSTEM) ||
586 (old_mem->mem_type == TTM_PL_SYSTEM &&
587 new_mem->mem_type == TTM_PL_TT)) {
588 /* bind is enough */
589 amdgpu_move_null(bo, new_mem);
590 return 0;
591 }
592
593 if (!adev->mman.buffer_funcs_enabled)
594 goto memcpy;
595
596 if (old_mem->mem_type == TTM_PL_VRAM &&
597 new_mem->mem_type == TTM_PL_SYSTEM) {
598 r = amdgpu_move_vram_ram(bo, evict, ctx, new_mem);
599 } else if (old_mem->mem_type == TTM_PL_SYSTEM &&
600 new_mem->mem_type == TTM_PL_VRAM) {
601 r = amdgpu_move_ram_vram(bo, evict, ctx, new_mem);
602 } else {
603 r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu,
604 new_mem, old_mem);
605 }
606
607 if (r) {
608memcpy:
609 r = ttm_bo_move_memcpy(bo, ctx, new_mem);
610 if (r) {
611 return r;
612 }
613 }
614
615 if (bo->type == ttm_bo_type_device &&
616 new_mem->mem_type == TTM_PL_VRAM &&
617 old_mem->mem_type != TTM_PL_VRAM) {
618 /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
619 * accesses the BO after it's moved.
620 */
621 abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
622 }
623
624 /* update statistics */
625 atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved);
626 return 0;
627}
628
629static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
630{
631 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
632 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
633 struct drm_mm_node *mm_node = mem->mm_node;
634
635 mem->bus.addr = NULL;
636 mem->bus.offset = 0;
637 mem->bus.size = mem->num_pages << PAGE_SHIFT;
638 mem->bus.base = 0;
639 mem->bus.is_iomem = false;
640 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
641 return -EINVAL;
642 switch (mem->mem_type) {
643 case TTM_PL_SYSTEM:
644 /* system memory */
645 return 0;
646 case TTM_PL_TT:
647 break;
648 case TTM_PL_VRAM:
649 mem->bus.offset = mem->start << PAGE_SHIFT;
650 /* check if it's visible */
651 if ((mem->bus.offset + mem->bus.size) > adev->gmc.visible_vram_size)
652 return -EINVAL;
653 /* Only physically contiguous buffers apply. In a contiguous
654 * buffer, size of the first mm_node would match the number of
655 * pages in ttm_mem_reg.
656 */
657 if (adev->mman.aper_base_kaddr &&
658 (mm_node->size == mem->num_pages))
659 mem->bus.addr = (u8 *)adev->mman.aper_base_kaddr +
660 mem->bus.offset;
661
662 mem->bus.base = adev->gmc.aper_base;
663 mem->bus.is_iomem = true;
664 break;
665 default:
666 return -EINVAL;
667 }
668 return 0;
669}
670
671static void amdgpu_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
672{
673}
674
675static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
676 unsigned long page_offset)
677{
678 struct drm_mm_node *mm;
679 unsigned long offset = (page_offset << PAGE_SHIFT);
680
681 mm = amdgpu_find_mm_node(&bo->mem, &offset);
682 return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start +
683 (offset >> PAGE_SHIFT);
684}
685
686/*
687 * TTM backend functions.
688 */
689struct amdgpu_ttm_gup_task_list {
690 struct list_head list;
691 struct task_struct *task;
692};
693
694struct amdgpu_ttm_tt {
695 struct ttm_dma_tt ttm;
696 u64 offset;
697 uint64_t userptr;
698 struct mm_struct *usermm;
699 uint32_t userflags;
700 spinlock_t guptasklock;
701 struct list_head guptasks;
702 atomic_t mmu_invalidations;
703 uint32_t last_set_pages;
704};
705
706int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
707{
708 struct amdgpu_ttm_tt *gtt = (void *)ttm;
709 unsigned int flags = 0;
710 unsigned pinned = 0;
711 int r;
712
713 if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
714 flags |= FOLL_WRITE;
715
716 down_read(¤t->mm->mmap_sem);
717
718 if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) {
719 /* check that we only use anonymous memory
720 to prevent problems with writeback */
721 unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE;
722 struct vm_area_struct *vma;
723
724 vma = find_vma(gtt->usermm, gtt->userptr);
725 if (!vma || vma->vm_file || vma->vm_end < end) {
726 up_read(¤t->mm->mmap_sem);
727 return -EPERM;
728 }
729 }
730
731 do {
732 unsigned num_pages = ttm->num_pages - pinned;
733 uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
734 struct page **p = pages + pinned;
735 struct amdgpu_ttm_gup_task_list guptask;
736
737 guptask.task = current;
738 spin_lock(>t->guptasklock);
739 list_add(&guptask.list, >t->guptasks);
740 spin_unlock(>t->guptasklock);
741
742 r = get_user_pages(userptr, num_pages, flags, p, NULL);
743
744 spin_lock(>t->guptasklock);
745 list_del(&guptask.list);
746 spin_unlock(>t->guptasklock);
747
748 if (r < 0)
749 goto release_pages;
750
751 pinned += r;
752
753 } while (pinned < ttm->num_pages);
754
755 up_read(¤t->mm->mmap_sem);
756 return 0;
757
758release_pages:
759 release_pages(pages, pinned);
760 up_read(¤t->mm->mmap_sem);
761 return r;
762}
763
764void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
765{
766 struct amdgpu_ttm_tt *gtt = (void *)ttm;
767 unsigned i;
768
769 gtt->last_set_pages = atomic_read(>t->mmu_invalidations);
770 for (i = 0; i < ttm->num_pages; ++i) {
771 if (ttm->pages[i])
772 put_page(ttm->pages[i]);
773
774 ttm->pages[i] = pages ? pages[i] : NULL;
775 }
776}
777
778void amdgpu_ttm_tt_mark_user_pages(struct ttm_tt *ttm)
779{
780 struct amdgpu_ttm_tt *gtt = (void *)ttm;
781 unsigned i;
782
783 for (i = 0; i < ttm->num_pages; ++i) {
784 struct page *page = ttm->pages[i];
785
786 if (!page)
787 continue;
788
789 if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
790 set_page_dirty(page);
791
792 mark_page_accessed(page);
793 }
794}
795
796/* prepare the sg table with the user pages */
797static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
798{
799 struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
800 struct amdgpu_ttm_tt *gtt = (void *)ttm;
801 unsigned nents;
802 int r;
803
804 int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
805 enum dma_data_direction direction = write ?
806 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
807
808 r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
809 ttm->num_pages << PAGE_SHIFT,
810 GFP_KERNEL);
811 if (r)
812 goto release_sg;
813
814 r = -ENOMEM;
815 nents = dma_map_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
816 if (nents != ttm->sg->nents)
817 goto release_sg;
818
819 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
820 gtt->ttm.dma_address, ttm->num_pages);
821
822 return 0;
823
824release_sg:
825 kfree(ttm->sg);
826 return r;
827}
828
829static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
830{
831 struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
832 struct amdgpu_ttm_tt *gtt = (void *)ttm;
833
834 int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
835 enum dma_data_direction direction = write ?
836 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
837
838 /* double check that we don't free the table twice */
839 if (!ttm->sg->sgl)
840 return;
841
842 /* free the sg table and pages again */
843 dma_unmap_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
844
845 amdgpu_ttm_tt_mark_user_pages(ttm);
846
847 sg_free_table(ttm->sg);
848}
849
850static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
851 struct ttm_mem_reg *bo_mem)
852{
853 struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
854 struct amdgpu_ttm_tt *gtt = (void*)ttm;
855 uint64_t flags;
856 int r = 0;
857
858 if (gtt->userptr) {
859 r = amdgpu_ttm_tt_pin_userptr(ttm);
860 if (r) {
861 DRM_ERROR("failed to pin userptr\n");
862 return r;
863 }
864 }
865 if (!ttm->num_pages) {
866 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
867 ttm->num_pages, bo_mem, ttm);
868 }
869
870 if (bo_mem->mem_type == AMDGPU_PL_GDS ||
871 bo_mem->mem_type == AMDGPU_PL_GWS ||
872 bo_mem->mem_type == AMDGPU_PL_OA)
873 return -EINVAL;
874
875 if (!amdgpu_gtt_mgr_has_gart_addr(bo_mem)) {
876 gtt->offset = AMDGPU_BO_INVALID_OFFSET;
877 return 0;
878 }
879
880 flags = amdgpu_ttm_tt_pte_flags(adev, ttm, bo_mem);
881 gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
882 r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
883 ttm->pages, gtt->ttm.dma_address, flags);
884
885 if (r)
886 DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
887 ttm->num_pages, gtt->offset);
888 return r;
889}
890
891int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
892{
893 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
894 struct ttm_operation_ctx ctx = { false, false };
895 struct amdgpu_ttm_tt *gtt = (void*)bo->ttm;
896 struct ttm_mem_reg tmp;
897 struct ttm_placement placement;
898 struct ttm_place placements;
899 uint64_t flags;
900 int r;
901
902 if (bo->mem.mem_type != TTM_PL_TT ||
903 amdgpu_gtt_mgr_has_gart_addr(&bo->mem))
904 return 0;
905
906 tmp = bo->mem;
907 tmp.mm_node = NULL;
908 placement.num_placement = 1;
909 placement.placement = &placements;
910 placement.num_busy_placement = 1;
911 placement.busy_placement = &placements;
912 placements.fpfn = 0;
913 placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
914 placements.flags = (bo->mem.placement & ~TTM_PL_MASK_MEM) |
915 TTM_PL_FLAG_TT;
916
917 r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
918 if (unlikely(r))
919 return r;
920
921 flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp);
922 gtt->offset = (u64)tmp.start << PAGE_SHIFT;
923 r = amdgpu_gart_bind(adev, gtt->offset, bo->ttm->num_pages,
924 bo->ttm->pages, gtt->ttm.dma_address, flags);
925 if (unlikely(r)) {
926 ttm_bo_mem_put(bo, &tmp);
927 return r;
928 }
929
930 ttm_bo_mem_put(bo, &bo->mem);
931 bo->mem = tmp;
932 bo->offset = (bo->mem.start << PAGE_SHIFT) +
933 bo->bdev->man[bo->mem.mem_type].gpu_offset;
934
935 return 0;
936}
937
938int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
939{
940 struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
941 struct amdgpu_ttm_tt *gtt = (void *)tbo->ttm;
942 uint64_t flags;
943 int r;
944
945 if (!gtt)
946 return 0;
947
948 flags = amdgpu_ttm_tt_pte_flags(adev, >t->ttm.ttm, &tbo->mem);
949 r = amdgpu_gart_bind(adev, gtt->offset, gtt->ttm.ttm.num_pages,
950 gtt->ttm.ttm.pages, gtt->ttm.dma_address, flags);
951 if (r)
952 DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
953 gtt->ttm.ttm.num_pages, gtt->offset);
954 return r;
955}
956
957static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
958{
959 struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
960 struct amdgpu_ttm_tt *gtt = (void *)ttm;
961 int r;
962
963 if (gtt->userptr)
964 amdgpu_ttm_tt_unpin_userptr(ttm);
965
966 if (gtt->offset == AMDGPU_BO_INVALID_OFFSET)
967 return 0;
968
969 /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
970 r = amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages);
971 if (r)
972 DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n",
973 gtt->ttm.ttm.num_pages, gtt->offset);
974 return r;
975}
976
977static void amdgpu_ttm_backend_destroy(struct ttm_tt *ttm)
978{
979 struct amdgpu_ttm_tt *gtt = (void *)ttm;
980
981 ttm_dma_tt_fini(>t->ttm);
982 kfree(gtt);
983}
984
985static struct ttm_backend_func amdgpu_backend_func = {
986 .bind = &amdgpu_ttm_backend_bind,
987 .unbind = &amdgpu_ttm_backend_unbind,
988 .destroy = &amdgpu_ttm_backend_destroy,
989};
990
991static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
992 uint32_t page_flags)
993{
994 struct amdgpu_device *adev;
995 struct amdgpu_ttm_tt *gtt;
996
997 adev = amdgpu_ttm_adev(bo->bdev);
998
999 gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
1000 if (gtt == NULL) {
1001 return NULL;
1002 }
1003 gtt->ttm.ttm.func = &amdgpu_backend_func;
1004 if (ttm_sg_tt_init(>t->ttm, bo, page_flags)) {
1005 kfree(gtt);
1006 return NULL;
1007 }
1008 return >t->ttm.ttm;
1009}
1010
1011static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm,
1012 struct ttm_operation_ctx *ctx)
1013{
1014 struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
1015 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1016 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1017
1018 if (gtt && gtt->userptr) {
1019 ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
1020 if (!ttm->sg)
1021 return -ENOMEM;
1022
1023 ttm->page_flags |= TTM_PAGE_FLAG_SG;
1024 ttm->state = tt_unbound;
1025 return 0;
1026 }
1027
1028 if (slave && ttm->sg) {
1029 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
1030 gtt->ttm.dma_address,
1031 ttm->num_pages);
1032 ttm->state = tt_unbound;
1033 return 0;
1034 }
1035
1036#ifdef CONFIG_SWIOTLB
1037 if (adev->need_swiotlb && swiotlb_nr_tbl()) {
1038 return ttm_dma_populate(>t->ttm, adev->dev, ctx);
1039 }
1040#endif
1041
1042 return ttm_populate_and_map_pages(adev->dev, >t->ttm, ctx);
1043}
1044
1045static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
1046{
1047 struct amdgpu_device *adev;
1048 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1049 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1050
1051 if (gtt && gtt->userptr) {
1052 amdgpu_ttm_tt_set_user_pages(ttm, NULL);
1053 kfree(ttm->sg);
1054 ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
1055 return;
1056 }
1057
1058 if (slave)
1059 return;
1060
1061 adev = amdgpu_ttm_adev(ttm->bdev);
1062
1063#ifdef CONFIG_SWIOTLB
1064 if (adev->need_swiotlb && swiotlb_nr_tbl()) {
1065 ttm_dma_unpopulate(>t->ttm, adev->dev);
1066 return;
1067 }
1068#endif
1069
1070 ttm_unmap_and_unpopulate_pages(adev->dev, >t->ttm);
1071}
1072
1073int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
1074 uint32_t flags)
1075{
1076 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1077
1078 if (gtt == NULL)
1079 return -EINVAL;
1080
1081 gtt->userptr = addr;
1082 gtt->usermm = current->mm;
1083 gtt->userflags = flags;
1084 spin_lock_init(>t->guptasklock);
1085 INIT_LIST_HEAD(>t->guptasks);
1086 atomic_set(>t->mmu_invalidations, 0);
1087 gtt->last_set_pages = 0;
1088
1089 return 0;
1090}
1091
1092struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
1093{
1094 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1095
1096 if (gtt == NULL)
1097 return NULL;
1098
1099 return gtt->usermm;
1100}
1101
1102bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
1103 unsigned long end)
1104{
1105 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1106 struct amdgpu_ttm_gup_task_list *entry;
1107 unsigned long size;
1108
1109 if (gtt == NULL || !gtt->userptr)
1110 return false;
1111
1112 size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE;
1113 if (gtt->userptr > end || gtt->userptr + size <= start)
1114 return false;
1115
1116 spin_lock(>t->guptasklock);
1117 list_for_each_entry(entry, >t->guptasks, list) {
1118 if (entry->task == current) {
1119 spin_unlock(>t->guptasklock);
1120 return false;
1121 }
1122 }
1123 spin_unlock(>t->guptasklock);
1124
1125 atomic_inc(>t->mmu_invalidations);
1126
1127 return true;
1128}
1129
1130bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
1131 int *last_invalidated)
1132{
1133 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1134 int prev_invalidated = *last_invalidated;
1135
1136 *last_invalidated = atomic_read(>t->mmu_invalidations);
1137 return prev_invalidated != *last_invalidated;
1138}
1139
1140bool amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt *ttm)
1141{
1142 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1143
1144 if (gtt == NULL || !gtt->userptr)
1145 return false;
1146
1147 return atomic_read(>t->mmu_invalidations) != gtt->last_set_pages;
1148}
1149
1150bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
1151{
1152 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1153
1154 if (gtt == NULL)
1155 return false;
1156
1157 return !!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
1158}
1159
1160uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
1161 struct ttm_mem_reg *mem)
1162{
1163 uint64_t flags = 0;
1164
1165 if (mem && mem->mem_type != TTM_PL_SYSTEM)
1166 flags |= AMDGPU_PTE_VALID;
1167
1168 if (mem && mem->mem_type == TTM_PL_TT) {
1169 flags |= AMDGPU_PTE_SYSTEM;
1170
1171 if (ttm->caching_state == tt_cached)
1172 flags |= AMDGPU_PTE_SNOOPED;
1173 }
1174
1175 flags |= adev->gart.gart_pte_flags;
1176 flags |= AMDGPU_PTE_READABLE;
1177
1178 if (!amdgpu_ttm_tt_is_readonly(ttm))
1179 flags |= AMDGPU_PTE_WRITEABLE;
1180
1181 return flags;
1182}
1183
1184static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
1185 const struct ttm_place *place)
1186{
1187 unsigned long num_pages = bo->mem.num_pages;
1188 struct drm_mm_node *node = bo->mem.mm_node;
1189 struct reservation_object_list *flist;
1190 struct dma_fence *f;
1191 int i;
1192
1193 /* If bo is a KFD BO, check if the bo belongs to the current process.
1194 * If true, then return false as any KFD process needs all its BOs to
1195 * be resident to run successfully
1196 */
1197 flist = reservation_object_get_list(bo->resv);
1198 if (flist) {
1199 for (i = 0; i < flist->shared_count; ++i) {
1200 f = rcu_dereference_protected(flist->shared[i],
1201 reservation_object_held(bo->resv));
1202 if (amdkfd_fence_check_mm(f, current->mm))
1203 return false;
1204 }
1205 }
1206
1207 switch (bo->mem.mem_type) {
1208 case TTM_PL_TT:
1209 return true;
1210
1211 case TTM_PL_VRAM:
1212 /* Check each drm MM node individually */
1213 while (num_pages) {
1214 if (place->fpfn < (node->start + node->size) &&
1215 !(place->lpfn && place->lpfn <= node->start))
1216 return true;
1217
1218 num_pages -= node->size;
1219 ++node;
1220 }
1221 return false;
1222
1223 default:
1224 break;
1225 }
1226
1227 return ttm_bo_eviction_valuable(bo, place);
1228}
1229
1230static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
1231 unsigned long offset,
1232 void *buf, int len, int write)
1233{
1234 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1235 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
1236 struct drm_mm_node *nodes;
1237 uint32_t value = 0;
1238 int ret = 0;
1239 uint64_t pos;
1240 unsigned long flags;
1241
1242 if (bo->mem.mem_type != TTM_PL_VRAM)
1243 return -EIO;
1244
1245 nodes = amdgpu_find_mm_node(&abo->tbo.mem, &offset);
1246 pos = (nodes->start << PAGE_SHIFT) + offset;
1247
1248 while (len && pos < adev->gmc.mc_vram_size) {
1249 uint64_t aligned_pos = pos & ~(uint64_t)3;
1250 uint32_t bytes = 4 - (pos & 3);
1251 uint32_t shift = (pos & 3) * 8;
1252 uint32_t mask = 0xffffffff << shift;
1253
1254 if (len < bytes) {
1255 mask &= 0xffffffff >> (bytes - len) * 8;
1256 bytes = len;
1257 }
1258
1259 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
1260 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000);
1261 WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31);
1262 if (!write || mask != 0xffffffff)
1263 value = RREG32_NO_KIQ(mmMM_DATA);
1264 if (write) {
1265 value &= ~mask;
1266 value |= (*(uint32_t *)buf << shift) & mask;
1267 WREG32_NO_KIQ(mmMM_DATA, value);
1268 }
1269 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
1270 if (!write) {
1271 value = (value & mask) >> shift;
1272 memcpy(buf, &value, bytes);
1273 }
1274
1275 ret += bytes;
1276 buf = (uint8_t *)buf + bytes;
1277 pos += bytes;
1278 len -= bytes;
1279 if (pos >= (nodes->start + nodes->size) << PAGE_SHIFT) {
1280 ++nodes;
1281 pos = (nodes->start << PAGE_SHIFT);
1282 }
1283 }
1284
1285 return ret;
1286}
1287
1288static struct ttm_bo_driver amdgpu_bo_driver = {
1289 .ttm_tt_create = &amdgpu_ttm_tt_create,
1290 .ttm_tt_populate = &amdgpu_ttm_tt_populate,
1291 .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
1292 .invalidate_caches = &amdgpu_invalidate_caches,
1293 .init_mem_type = &amdgpu_init_mem_type,
1294 .eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
1295 .evict_flags = &amdgpu_evict_flags,
1296 .move = &amdgpu_bo_move,
1297 .verify_access = &amdgpu_verify_access,
1298 .move_notify = &amdgpu_bo_move_notify,
1299 .fault_reserve_notify = &amdgpu_bo_fault_reserve_notify,
1300 .io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
1301 .io_mem_free = &amdgpu_ttm_io_mem_free,
1302 .io_mem_pfn = amdgpu_ttm_io_mem_pfn,
1303 .access_memory = &amdgpu_ttm_access_memory
1304};
1305
1306/*
1307 * Firmware Reservation functions
1308 */
1309/**
1310 * amdgpu_ttm_fw_reserve_vram_fini - free fw reserved vram
1311 *
1312 * @adev: amdgpu_device pointer
1313 *
1314 * free fw reserved vram if it has been reserved.
1315 */
1316static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev)
1317{
1318 amdgpu_bo_free_kernel(&adev->fw_vram_usage.reserved_bo,
1319 NULL, &adev->fw_vram_usage.va);
1320}
1321
1322/**
1323 * amdgpu_ttm_fw_reserve_vram_init - create bo vram reservation from fw
1324 *
1325 * @adev: amdgpu_device pointer
1326 *
1327 * create bo vram reservation from fw.
1328 */
1329static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
1330{
1331 struct ttm_operation_ctx ctx = { false, false };
1332 int r = 0;
1333 int i;
1334 u64 vram_size = adev->gmc.visible_vram_size;
1335 u64 offset = adev->fw_vram_usage.start_offset;
1336 u64 size = adev->fw_vram_usage.size;
1337 struct amdgpu_bo *bo;
1338
1339 adev->fw_vram_usage.va = NULL;
1340 adev->fw_vram_usage.reserved_bo = NULL;
1341
1342 if (adev->fw_vram_usage.size > 0 &&
1343 adev->fw_vram_usage.size <= vram_size) {
1344
1345 r = amdgpu_bo_create(adev, adev->fw_vram_usage.size, PAGE_SIZE,
1346 AMDGPU_GEM_DOMAIN_VRAM,
1347 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
1348 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
1349 ttm_bo_type_kernel, NULL,
1350 &adev->fw_vram_usage.reserved_bo);
1351 if (r)
1352 goto error_create;
1353
1354 r = amdgpu_bo_reserve(adev->fw_vram_usage.reserved_bo, false);
1355 if (r)
1356 goto error_reserve;
1357
1358 /* remove the original mem node and create a new one at the
1359 * request position
1360 */
1361 bo = adev->fw_vram_usage.reserved_bo;
1362 offset = ALIGN(offset, PAGE_SIZE);
1363 for (i = 0; i < bo->placement.num_placement; ++i) {
1364 bo->placements[i].fpfn = offset >> PAGE_SHIFT;
1365 bo->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
1366 }
1367
1368 ttm_bo_mem_put(&bo->tbo, &bo->tbo.mem);
1369 r = ttm_bo_mem_space(&bo->tbo, &bo->placement,
1370 &bo->tbo.mem, &ctx);
1371 if (r)
1372 goto error_pin;
1373
1374 r = amdgpu_bo_pin_restricted(adev->fw_vram_usage.reserved_bo,
1375 AMDGPU_GEM_DOMAIN_VRAM,
1376 adev->fw_vram_usage.start_offset,
1377 (adev->fw_vram_usage.start_offset +
1378 adev->fw_vram_usage.size), NULL);
1379 if (r)
1380 goto error_pin;
1381 r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo,
1382 &adev->fw_vram_usage.va);
1383 if (r)
1384 goto error_kmap;
1385
1386 amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
1387 }
1388 return r;
1389
1390error_kmap:
1391 amdgpu_bo_unpin(adev->fw_vram_usage.reserved_bo);
1392error_pin:
1393 amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
1394error_reserve:
1395 amdgpu_bo_unref(&adev->fw_vram_usage.reserved_bo);
1396error_create:
1397 adev->fw_vram_usage.va = NULL;
1398 adev->fw_vram_usage.reserved_bo = NULL;
1399 return r;
1400}
1401
1402int amdgpu_ttm_init(struct amdgpu_device *adev)
1403{
1404 uint64_t gtt_size;
1405 int r;
1406 u64 vis_vram_limit;
1407
1408 r = amdgpu_ttm_global_init(adev);
1409 if (r) {
1410 return r;
1411 }
1412 /* No others user of address space so set it to 0 */
1413 r = ttm_bo_device_init(&adev->mman.bdev,
1414 adev->mman.bo_global_ref.ref.object,
1415 &amdgpu_bo_driver,
1416 adev->ddev->anon_inode->i_mapping,
1417 DRM_FILE_PAGE_OFFSET,
1418 adev->need_dma32);
1419 if (r) {
1420 DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
1421 return r;
1422 }
1423 adev->mman.initialized = true;
1424
1425 /* We opt to avoid OOM on system pages allocations */
1426 adev->mman.bdev.no_retry = true;
1427
1428 r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM,
1429 adev->gmc.real_vram_size >> PAGE_SHIFT);
1430 if (r) {
1431 DRM_ERROR("Failed initializing VRAM heap.\n");
1432 return r;
1433 }
1434
1435 /* Reduce size of CPU-visible VRAM if requested */
1436 vis_vram_limit = (u64)amdgpu_vis_vram_limit * 1024 * 1024;
1437 if (amdgpu_vis_vram_limit > 0 &&
1438 vis_vram_limit <= adev->gmc.visible_vram_size)
1439 adev->gmc.visible_vram_size = vis_vram_limit;
1440
1441 /* Change the size here instead of the init above so only lpfn is affected */
1442 amdgpu_ttm_set_buffer_funcs_status(adev, false);
1443#ifdef CONFIG_64BIT
1444 adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base,
1445 adev->gmc.visible_vram_size);
1446#endif
1447
1448 /*
1449 *The reserved vram for firmware must be pinned to the specified
1450 *place on the VRAM, so reserve it early.
1451 */
1452 r = amdgpu_ttm_fw_reserve_vram_init(adev);
1453 if (r) {
1454 return r;
1455 }
1456
1457 r = amdgpu_bo_create_kernel(adev, adev->gmc.stolen_size, PAGE_SIZE,
1458 AMDGPU_GEM_DOMAIN_VRAM,
1459 &adev->stolen_vga_memory,
1460 NULL, NULL);
1461 if (r)
1462 return r;
1463 DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
1464 (unsigned) (adev->gmc.real_vram_size / (1024 * 1024)));
1465
1466 if (amdgpu_gtt_size == -1) {
1467 struct sysinfo si;
1468
1469 si_meminfo(&si);
1470 gtt_size = min(max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
1471 adev->gmc.mc_vram_size),
1472 ((uint64_t)si.totalram * si.mem_unit * 3/4));
1473 }
1474 else
1475 gtt_size = (uint64_t)amdgpu_gtt_size << 20;
1476 r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT, gtt_size >> PAGE_SHIFT);
1477 if (r) {
1478 DRM_ERROR("Failed initializing GTT heap.\n");
1479 return r;
1480 }
1481 DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
1482 (unsigned)(gtt_size / (1024 * 1024)));
1483
1484 adev->gds.mem.total_size = adev->gds.mem.total_size << AMDGPU_GDS_SHIFT;
1485 adev->gds.mem.gfx_partition_size = adev->gds.mem.gfx_partition_size << AMDGPU_GDS_SHIFT;
1486 adev->gds.mem.cs_partition_size = adev->gds.mem.cs_partition_size << AMDGPU_GDS_SHIFT;
1487 adev->gds.gws.total_size = adev->gds.gws.total_size << AMDGPU_GWS_SHIFT;
1488 adev->gds.gws.gfx_partition_size = adev->gds.gws.gfx_partition_size << AMDGPU_GWS_SHIFT;
1489 adev->gds.gws.cs_partition_size = adev->gds.gws.cs_partition_size << AMDGPU_GWS_SHIFT;
1490 adev->gds.oa.total_size = adev->gds.oa.total_size << AMDGPU_OA_SHIFT;
1491 adev->gds.oa.gfx_partition_size = adev->gds.oa.gfx_partition_size << AMDGPU_OA_SHIFT;
1492 adev->gds.oa.cs_partition_size = adev->gds.oa.cs_partition_size << AMDGPU_OA_SHIFT;
1493 /* GDS Memory */
1494 if (adev->gds.mem.total_size) {
1495 r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GDS,
1496 adev->gds.mem.total_size >> PAGE_SHIFT);
1497 if (r) {
1498 DRM_ERROR("Failed initializing GDS heap.\n");
1499 return r;
1500 }
1501 }
1502
1503 /* GWS */
1504 if (adev->gds.gws.total_size) {
1505 r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GWS,
1506 adev->gds.gws.total_size >> PAGE_SHIFT);
1507 if (r) {
1508 DRM_ERROR("Failed initializing gws heap.\n");
1509 return r;
1510 }
1511 }
1512
1513 /* OA */
1514 if (adev->gds.oa.total_size) {
1515 r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_OA,
1516 adev->gds.oa.total_size >> PAGE_SHIFT);
1517 if (r) {
1518 DRM_ERROR("Failed initializing oa heap.\n");
1519 return r;
1520 }
1521 }
1522
1523 r = amdgpu_ttm_debugfs_init(adev);
1524 if (r) {
1525 DRM_ERROR("Failed to init debugfs\n");
1526 return r;
1527 }
1528 return 0;
1529}
1530
1531void amdgpu_ttm_fini(struct amdgpu_device *adev)
1532{
1533 if (!adev->mman.initialized)
1534 return;
1535
1536 amdgpu_ttm_debugfs_fini(adev);
1537 amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL);
1538 amdgpu_ttm_fw_reserve_vram_fini(adev);
1539 if (adev->mman.aper_base_kaddr)
1540 iounmap(adev->mman.aper_base_kaddr);
1541 adev->mman.aper_base_kaddr = NULL;
1542
1543 ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_VRAM);
1544 ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_TT);
1545 if (adev->gds.mem.total_size)
1546 ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GDS);
1547 if (adev->gds.gws.total_size)
1548 ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GWS);
1549 if (adev->gds.oa.total_size)
1550 ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA);
1551 ttm_bo_device_release(&adev->mman.bdev);
1552 amdgpu_ttm_global_fini(adev);
1553 adev->mman.initialized = false;
1554 DRM_INFO("amdgpu: ttm finalized\n");
1555}
1556
1557/**
1558 * amdgpu_ttm_set_buffer_funcs_status - enable/disable use of buffer functions
1559 *
1560 * @adev: amdgpu_device pointer
1561 * @enable: true when we can use buffer functions.
1562 *
1563 * Enable/disable use of buffer functions during suspend/resume. This should
1564 * only be called at bootup or when userspace isn't running.
1565 */
1566void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
1567{
1568 struct ttm_mem_type_manager *man = &adev->mman.bdev.man[TTM_PL_VRAM];
1569 uint64_t size;
1570
1571 if (!adev->mman.initialized || adev->in_gpu_reset)
1572 return;
1573
1574 /* this just adjusts TTM size idea, which sets lpfn to the correct value */
1575 if (enable)
1576 size = adev->gmc.real_vram_size;
1577 else
1578 size = adev->gmc.visible_vram_size;
1579 man->size = size >> PAGE_SHIFT;
1580 adev->mman.buffer_funcs_enabled = enable;
1581}
1582
1583int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
1584{
1585 struct drm_file *file_priv;
1586 struct amdgpu_device *adev;
1587
1588 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
1589 return -EINVAL;
1590
1591 file_priv = filp->private_data;
1592 adev = file_priv->minor->dev->dev_private;
1593 if (adev == NULL)
1594 return -EINVAL;
1595
1596 return ttm_bo_mmap(filp, vma, &adev->mman.bdev);
1597}
1598
1599static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
1600 struct ttm_mem_reg *mem, unsigned num_pages,
1601 uint64_t offset, unsigned window,
1602 struct amdgpu_ring *ring,
1603 uint64_t *addr)
1604{
1605 struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
1606 struct amdgpu_device *adev = ring->adev;
1607 struct ttm_tt *ttm = bo->ttm;
1608 struct amdgpu_job *job;
1609 unsigned num_dw, num_bytes;
1610 dma_addr_t *dma_address;
1611 struct dma_fence *fence;
1612 uint64_t src_addr, dst_addr;
1613 uint64_t flags;
1614 int r;
1615
1616 BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
1617 AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
1618
1619 *addr = adev->gmc.gart_start;
1620 *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
1621 AMDGPU_GPU_PAGE_SIZE;
1622
1623 num_dw = adev->mman.buffer_funcs->copy_num_dw;
1624 while (num_dw & 0x7)
1625 num_dw++;
1626
1627 num_bytes = num_pages * 8;
1628
1629 r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, &job);
1630 if (r)
1631 return r;
1632
1633 src_addr = num_dw * 4;
1634 src_addr += job->ibs[0].gpu_addr;
1635
1636 dst_addr = adev->gart.table_addr;
1637 dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8;
1638 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
1639 dst_addr, num_bytes);
1640
1641 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
1642 WARN_ON(job->ibs[0].length_dw > num_dw);
1643
1644 dma_address = >t->ttm.dma_address[offset >> PAGE_SHIFT];
1645 flags = amdgpu_ttm_tt_pte_flags(adev, ttm, mem);
1646 r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags,
1647 &job->ibs[0].ptr[num_dw]);
1648 if (r)
1649 goto error_free;
1650
1651 r = amdgpu_job_submit(job, ring, &adev->mman.entity,
1652 AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
1653 if (r)
1654 goto error_free;
1655
1656 dma_fence_put(fence);
1657
1658 return r;
1659
1660error_free:
1661 amdgpu_job_free(job);
1662 return r;
1663}
1664
1665int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
1666 uint64_t dst_offset, uint32_t byte_count,
1667 struct reservation_object *resv,
1668 struct dma_fence **fence, bool direct_submit,
1669 bool vm_needs_flush)
1670{
1671 struct amdgpu_device *adev = ring->adev;
1672 struct amdgpu_job *job;
1673
1674 uint32_t max_bytes;
1675 unsigned num_loops, num_dw;
1676 unsigned i;
1677 int r;
1678
1679 if (direct_submit && !ring->ready) {
1680 DRM_ERROR("Trying to move memory with ring turned off.\n");
1681 return -EINVAL;
1682 }
1683
1684 max_bytes = adev->mman.buffer_funcs->copy_max_bytes;
1685 num_loops = DIV_ROUND_UP(byte_count, max_bytes);
1686 num_dw = num_loops * adev->mman.buffer_funcs->copy_num_dw;
1687
1688 /* for IB padding */
1689 while (num_dw & 0x7)
1690 num_dw++;
1691
1692 r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
1693 if (r)
1694 return r;
1695
1696 job->vm_needs_flush = vm_needs_flush;
1697 if (resv) {
1698 r = amdgpu_sync_resv(adev, &job->sync, resv,
1699 AMDGPU_FENCE_OWNER_UNDEFINED,
1700 false);
1701 if (r) {
1702 DRM_ERROR("sync failed (%d).\n", r);
1703 goto error_free;
1704 }
1705 }
1706
1707 for (i = 0; i < num_loops; i++) {
1708 uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
1709
1710 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset,
1711 dst_offset, cur_size_in_bytes);
1712
1713 src_offset += cur_size_in_bytes;
1714 dst_offset += cur_size_in_bytes;
1715 byte_count -= cur_size_in_bytes;
1716 }
1717
1718 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
1719 WARN_ON(job->ibs[0].length_dw > num_dw);
1720 if (direct_submit) {
1721 r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs,
1722 NULL, fence);
1723 job->fence = dma_fence_get(*fence);
1724 if (r)
1725 DRM_ERROR("Error scheduling IBs (%d)\n", r);
1726 amdgpu_job_free(job);
1727 } else {
1728 r = amdgpu_job_submit(job, ring, &adev->mman.entity,
1729 AMDGPU_FENCE_OWNER_UNDEFINED, fence);
1730 if (r)
1731 goto error_free;
1732 }
1733
1734 return r;
1735
1736error_free:
1737 amdgpu_job_free(job);
1738 return r;
1739}
1740
1741int amdgpu_fill_buffer(struct amdgpu_bo *bo,
1742 uint32_t src_data,
1743 struct reservation_object *resv,
1744 struct dma_fence **fence)
1745{
1746 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1747 uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
1748 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
1749
1750 struct drm_mm_node *mm_node;
1751 unsigned long num_pages;
1752 unsigned int num_loops, num_dw;
1753
1754 struct amdgpu_job *job;
1755 int r;
1756
1757 if (!adev->mman.buffer_funcs_enabled) {
1758 DRM_ERROR("Trying to clear memory with ring turned off.\n");
1759 return -EINVAL;
1760 }
1761
1762 if (bo->tbo.mem.mem_type == TTM_PL_TT) {
1763 r = amdgpu_ttm_alloc_gart(&bo->tbo);
1764 if (r)
1765 return r;
1766 }
1767
1768 num_pages = bo->tbo.num_pages;
1769 mm_node = bo->tbo.mem.mm_node;
1770 num_loops = 0;
1771 while (num_pages) {
1772 uint32_t byte_count = mm_node->size << PAGE_SHIFT;
1773
1774 num_loops += DIV_ROUND_UP(byte_count, max_bytes);
1775 num_pages -= mm_node->size;
1776 ++mm_node;
1777 }
1778 num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw;
1779
1780 /* for IB padding */
1781 num_dw += 64;
1782
1783 r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
1784 if (r)
1785 return r;
1786
1787 if (resv) {
1788 r = amdgpu_sync_resv(adev, &job->sync, resv,
1789 AMDGPU_FENCE_OWNER_UNDEFINED, false);
1790 if (r) {
1791 DRM_ERROR("sync failed (%d).\n", r);
1792 goto error_free;
1793 }
1794 }
1795
1796 num_pages = bo->tbo.num_pages;
1797 mm_node = bo->tbo.mem.mm_node;
1798
1799 while (num_pages) {
1800 uint32_t byte_count = mm_node->size << PAGE_SHIFT;
1801 uint64_t dst_addr;
1802
1803 dst_addr = amdgpu_mm_node_addr(&bo->tbo, mm_node, &bo->tbo.mem);
1804 while (byte_count) {
1805 uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
1806
1807 amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data,
1808 dst_addr, cur_size_in_bytes);
1809
1810 dst_addr += cur_size_in_bytes;
1811 byte_count -= cur_size_in_bytes;
1812 }
1813
1814 num_pages -= mm_node->size;
1815 ++mm_node;
1816 }
1817
1818 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
1819 WARN_ON(job->ibs[0].length_dw > num_dw);
1820 r = amdgpu_job_submit(job, ring, &adev->mman.entity,
1821 AMDGPU_FENCE_OWNER_UNDEFINED, fence);
1822 if (r)
1823 goto error_free;
1824
1825 return 0;
1826
1827error_free:
1828 amdgpu_job_free(job);
1829 return r;
1830}
1831
1832#if defined(CONFIG_DEBUG_FS)
1833
1834static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
1835{
1836 struct drm_info_node *node = (struct drm_info_node *)m->private;
1837 unsigned ttm_pl = *(int *)node->info_ent->data;
1838 struct drm_device *dev = node->minor->dev;
1839 struct amdgpu_device *adev = dev->dev_private;
1840 struct ttm_mem_type_manager *man = &adev->mman.bdev.man[ttm_pl];
1841 struct drm_printer p = drm_seq_file_printer(m);
1842
1843 man->func->debug(man, &p);
1844 return 0;
1845}
1846
1847static int ttm_pl_vram = TTM_PL_VRAM;
1848static int ttm_pl_tt = TTM_PL_TT;
1849
1850static const struct drm_info_list amdgpu_ttm_debugfs_list[] = {
1851 {"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, &ttm_pl_vram},
1852 {"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, &ttm_pl_tt},
1853 {"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL},
1854#ifdef CONFIG_SWIOTLB
1855 {"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL}
1856#endif
1857};
1858
1859static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
1860 size_t size, loff_t *pos)
1861{
1862 struct amdgpu_device *adev = file_inode(f)->i_private;
1863 ssize_t result = 0;
1864 int r;
1865
1866 if (size & 0x3 || *pos & 0x3)
1867 return -EINVAL;
1868
1869 if (*pos >= adev->gmc.mc_vram_size)
1870 return -ENXIO;
1871
1872 while (size) {
1873 unsigned long flags;
1874 uint32_t value;
1875
1876 if (*pos >= adev->gmc.mc_vram_size)
1877 return result;
1878
1879 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
1880 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
1881 WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31);
1882 value = RREG32_NO_KIQ(mmMM_DATA);
1883 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
1884
1885 r = put_user(value, (uint32_t *)buf);
1886 if (r)
1887 return r;
1888
1889 result += 4;
1890 buf += 4;
1891 *pos += 4;
1892 size -= 4;
1893 }
1894
1895 return result;
1896}
1897
1898static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf,
1899 size_t size, loff_t *pos)
1900{
1901 struct amdgpu_device *adev = file_inode(f)->i_private;
1902 ssize_t result = 0;
1903 int r;
1904
1905 if (size & 0x3 || *pos & 0x3)
1906 return -EINVAL;
1907
1908 if (*pos >= adev->gmc.mc_vram_size)
1909 return -ENXIO;
1910
1911 while (size) {
1912 unsigned long flags;
1913 uint32_t value;
1914
1915 if (*pos >= adev->gmc.mc_vram_size)
1916 return result;
1917
1918 r = get_user(value, (uint32_t *)buf);
1919 if (r)
1920 return r;
1921
1922 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
1923 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
1924 WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31);
1925 WREG32_NO_KIQ(mmMM_DATA, value);
1926 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
1927
1928 result += 4;
1929 buf += 4;
1930 *pos += 4;
1931 size -= 4;
1932 }
1933
1934 return result;
1935}
1936
1937static const struct file_operations amdgpu_ttm_vram_fops = {
1938 .owner = THIS_MODULE,
1939 .read = amdgpu_ttm_vram_read,
1940 .write = amdgpu_ttm_vram_write,
1941 .llseek = default_llseek,
1942};
1943
1944#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
1945
1946static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf,
1947 size_t size, loff_t *pos)
1948{
1949 struct amdgpu_device *adev = file_inode(f)->i_private;
1950 ssize_t result = 0;
1951 int r;
1952
1953 while (size) {
1954 loff_t p = *pos / PAGE_SIZE;
1955 unsigned off = *pos & ~PAGE_MASK;
1956 size_t cur_size = min_t(size_t, size, PAGE_SIZE - off);
1957 struct page *page;
1958 void *ptr;
1959
1960 if (p >= adev->gart.num_cpu_pages)
1961 return result;
1962
1963 page = adev->gart.pages[p];
1964 if (page) {
1965 ptr = kmap(page);
1966 ptr += off;
1967
1968 r = copy_to_user(buf, ptr, cur_size);
1969 kunmap(adev->gart.pages[p]);
1970 } else
1971 r = clear_user(buf, cur_size);
1972
1973 if (r)
1974 return -EFAULT;
1975
1976 result += cur_size;
1977 buf += cur_size;
1978 *pos += cur_size;
1979 size -= cur_size;
1980 }
1981
1982 return result;
1983}
1984
1985static const struct file_operations amdgpu_ttm_gtt_fops = {
1986 .owner = THIS_MODULE,
1987 .read = amdgpu_ttm_gtt_read,
1988 .llseek = default_llseek
1989};
1990
1991#endif
1992
1993static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf,
1994 size_t size, loff_t *pos)
1995{
1996 struct amdgpu_device *adev = file_inode(f)->i_private;
1997 struct iommu_domain *dom;
1998 ssize_t result = 0;
1999 int r;
2000
2001 dom = iommu_get_domain_for_dev(adev->dev);
2002
2003 while (size) {
2004 phys_addr_t addr = *pos & PAGE_MASK;
2005 loff_t off = *pos & ~PAGE_MASK;
2006 size_t bytes = PAGE_SIZE - off;
2007 unsigned long pfn;
2008 struct page *p;
2009 void *ptr;
2010
2011 bytes = bytes < size ? bytes : size;
2012
2013 addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
2014
2015 pfn = addr >> PAGE_SHIFT;
2016 if (!pfn_valid(pfn))
2017 return -EPERM;
2018
2019 p = pfn_to_page(pfn);
2020 if (p->mapping != adev->mman.bdev.dev_mapping)
2021 return -EPERM;
2022
2023 ptr = kmap(p);
2024 r = copy_to_user(buf, ptr + off, bytes);
2025 kunmap(p);
2026 if (r)
2027 return -EFAULT;
2028
2029 size -= bytes;
2030 *pos += bytes;
2031 result += bytes;
2032 }
2033
2034 return result;
2035}
2036
2037static ssize_t amdgpu_iomem_write(struct file *f, const char __user *buf,
2038 size_t size, loff_t *pos)
2039{
2040 struct amdgpu_device *adev = file_inode(f)->i_private;
2041 struct iommu_domain *dom;
2042 ssize_t result = 0;
2043 int r;
2044
2045 dom = iommu_get_domain_for_dev(adev->dev);
2046
2047 while (size) {
2048 phys_addr_t addr = *pos & PAGE_MASK;
2049 loff_t off = *pos & ~PAGE_MASK;
2050 size_t bytes = PAGE_SIZE - off;
2051 unsigned long pfn;
2052 struct page *p;
2053 void *ptr;
2054
2055 bytes = bytes < size ? bytes : size;
2056
2057 addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
2058
2059 pfn = addr >> PAGE_SHIFT;
2060 if (!pfn_valid(pfn))
2061 return -EPERM;
2062
2063 p = pfn_to_page(pfn);
2064 if (p->mapping != adev->mman.bdev.dev_mapping)
2065 return -EPERM;
2066
2067 ptr = kmap(p);
2068 r = copy_from_user(ptr + off, buf, bytes);
2069 kunmap(p);
2070 if (r)
2071 return -EFAULT;
2072
2073 size -= bytes;
2074 *pos += bytes;
2075 result += bytes;
2076 }
2077
2078 return result;
2079}
2080
2081static const struct file_operations amdgpu_ttm_iomem_fops = {
2082 .owner = THIS_MODULE,
2083 .read = amdgpu_iomem_read,
2084 .write = amdgpu_iomem_write,
2085 .llseek = default_llseek
2086};
2087
2088static const struct {
2089 char *name;
2090 const struct file_operations *fops;
2091 int domain;
2092} ttm_debugfs_entries[] = {
2093 { "amdgpu_vram", &amdgpu_ttm_vram_fops, TTM_PL_VRAM },
2094#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
2095 { "amdgpu_gtt", &amdgpu_ttm_gtt_fops, TTM_PL_TT },
2096#endif
2097 { "amdgpu_iomem", &amdgpu_ttm_iomem_fops, TTM_PL_SYSTEM },
2098};
2099
2100#endif
2101
2102static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
2103{
2104#if defined(CONFIG_DEBUG_FS)
2105 unsigned count;
2106
2107 struct drm_minor *minor = adev->ddev->primary;
2108 struct dentry *ent, *root = minor->debugfs_root;
2109
2110 for (count = 0; count < ARRAY_SIZE(ttm_debugfs_entries); count++) {
2111 ent = debugfs_create_file(
2112 ttm_debugfs_entries[count].name,
2113 S_IFREG | S_IRUGO, root,
2114 adev,
2115 ttm_debugfs_entries[count].fops);
2116 if (IS_ERR(ent))
2117 return PTR_ERR(ent);
2118 if (ttm_debugfs_entries[count].domain == TTM_PL_VRAM)
2119 i_size_write(ent->d_inode, adev->gmc.mc_vram_size);
2120 else if (ttm_debugfs_entries[count].domain == TTM_PL_TT)
2121 i_size_write(ent->d_inode, adev->gmc.gart_size);
2122 adev->mman.debugfs_entries[count] = ent;
2123 }
2124
2125 count = ARRAY_SIZE(amdgpu_ttm_debugfs_list);
2126
2127#ifdef CONFIG_SWIOTLB
2128 if (!(adev->need_swiotlb && swiotlb_nr_tbl()))
2129 --count;
2130#endif
2131
2132 return amdgpu_debugfs_add_files(adev, amdgpu_ttm_debugfs_list, count);
2133#else
2134 return 0;
2135#endif
2136}
2137
2138static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev)
2139{
2140#if defined(CONFIG_DEBUG_FS)
2141 unsigned i;
2142
2143 for (i = 0; i < ARRAY_SIZE(ttm_debugfs_entries); i++)
2144 debugfs_remove(adev->mman.debugfs_entries[i]);
2145#endif
2146}