Loading...
1/*
2 * Copyright 2009 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30 * Dave Airlie
31 */
32
33#include <linux/dma-mapping.h>
34#include <linux/iommu.h>
35#include <linux/pagemap.h>
36#include <linux/sched/task.h>
37#include <linux/sched/mm.h>
38#include <linux/seq_file.h>
39#include <linux/slab.h>
40#include <linux/swap.h>
41#include <linux/swiotlb.h>
42#include <linux/dma-buf.h>
43#include <linux/sizes.h>
44
45#include <drm/ttm/ttm_bo_api.h>
46#include <drm/ttm/ttm_bo_driver.h>
47#include <drm/ttm/ttm_placement.h>
48#include <drm/ttm/ttm_range_manager.h>
49
50#include <drm/amdgpu_drm.h>
51
52#include "amdgpu.h"
53#include "amdgpu_object.h"
54#include "amdgpu_trace.h"
55#include "amdgpu_amdkfd.h"
56#include "amdgpu_sdma.h"
57#include "amdgpu_ras.h"
58#include "amdgpu_atomfirmware.h"
59#include "amdgpu_res_cursor.h"
60#include "bif/bif_4_1_d.h"
61
62#define AMDGPU_TTM_VRAM_MAX_DW_READ (size_t)128
63
64static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
65 struct ttm_tt *ttm,
66 struct ttm_resource *bo_mem);
67static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
68 struct ttm_tt *ttm);
69
70static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev,
71 unsigned int type,
72 uint64_t size_in_page)
73{
74 return ttm_range_man_init(&adev->mman.bdev, type,
75 false, size_in_page);
76}
77
78/**
79 * amdgpu_evict_flags - Compute placement flags
80 *
81 * @bo: The buffer object to evict
82 * @placement: Possible destination(s) for evicted BO
83 *
84 * Fill in placement data when ttm_bo_evict() is called
85 */
86static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
87 struct ttm_placement *placement)
88{
89 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
90 struct amdgpu_bo *abo;
91 static const struct ttm_place placements = {
92 .fpfn = 0,
93 .lpfn = 0,
94 .mem_type = TTM_PL_SYSTEM,
95 .flags = 0
96 };
97
98 /* Don't handle scatter gather BOs */
99 if (bo->type == ttm_bo_type_sg) {
100 placement->num_placement = 0;
101 placement->num_busy_placement = 0;
102 return;
103 }
104
105 /* Object isn't an AMDGPU object so ignore */
106 if (!amdgpu_bo_is_amdgpu_bo(bo)) {
107 placement->placement = &placements;
108 placement->busy_placement = &placements;
109 placement->num_placement = 1;
110 placement->num_busy_placement = 1;
111 return;
112 }
113
114 abo = ttm_to_amdgpu_bo(bo);
115 if (abo->flags & AMDGPU_AMDKFD_CREATE_SVM_BO) {
116 struct dma_fence *fence;
117 struct dma_resv *resv = &bo->base._resv;
118
119 rcu_read_lock();
120 fence = rcu_dereference(resv->fence_excl);
121 if (fence && !fence->ops->signaled)
122 dma_fence_enable_sw_signaling(fence);
123
124 placement->num_placement = 0;
125 placement->num_busy_placement = 0;
126 rcu_read_unlock();
127 return;
128 }
129
130 switch (bo->resource->mem_type) {
131 case AMDGPU_PL_GDS:
132 case AMDGPU_PL_GWS:
133 case AMDGPU_PL_OA:
134 placement->num_placement = 0;
135 placement->num_busy_placement = 0;
136 return;
137
138 case TTM_PL_VRAM:
139 if (!adev->mman.buffer_funcs_enabled) {
140 /* Move to system memory */
141 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
142 } else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
143 !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
144 amdgpu_bo_in_cpu_visible_vram(abo)) {
145
146 /* Try evicting to the CPU inaccessible part of VRAM
147 * first, but only set GTT as busy placement, so this
148 * BO will be evicted to GTT rather than causing other
149 * BOs to be evicted from VRAM
150 */
151 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
152 AMDGPU_GEM_DOMAIN_GTT);
153 abo->placements[0].fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
154 abo->placements[0].lpfn = 0;
155 abo->placement.busy_placement = &abo->placements[1];
156 abo->placement.num_busy_placement = 1;
157 } else {
158 /* Move to GTT memory */
159 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
160 }
161 break;
162 case TTM_PL_TT:
163 case AMDGPU_PL_PREEMPT:
164 default:
165 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
166 break;
167 }
168 *placement = abo->placement;
169}
170
171/**
172 * amdgpu_ttm_map_buffer - Map memory into the GART windows
173 * @bo: buffer object to map
174 * @mem: memory object to map
175 * @mm_cur: range to map
176 * @num_pages: number of pages to map
177 * @window: which GART window to use
178 * @ring: DMA ring to use for the copy
179 * @tmz: if we should setup a TMZ enabled mapping
180 * @addr: resulting address inside the MC address space
181 *
182 * Setup one of the GART windows to access a specific piece of memory or return
183 * the physical address for local memory.
184 */
185static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
186 struct ttm_resource *mem,
187 struct amdgpu_res_cursor *mm_cur,
188 unsigned num_pages, unsigned window,
189 struct amdgpu_ring *ring, bool tmz,
190 uint64_t *addr)
191{
192 struct amdgpu_device *adev = ring->adev;
193 struct amdgpu_job *job;
194 unsigned num_dw, num_bytes;
195 struct dma_fence *fence;
196 uint64_t src_addr, dst_addr;
197 void *cpu_addr;
198 uint64_t flags;
199 unsigned int i;
200 int r;
201
202 BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
203 AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
204 BUG_ON(mem->mem_type == AMDGPU_PL_PREEMPT);
205
206 /* Map only what can't be accessed directly */
207 if (!tmz && mem->start != AMDGPU_BO_INVALID_OFFSET) {
208 *addr = amdgpu_ttm_domain_start(adev, mem->mem_type) +
209 mm_cur->start;
210 return 0;
211 }
212
213 *addr = adev->gmc.gart_start;
214 *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
215 AMDGPU_GPU_PAGE_SIZE;
216 *addr += mm_cur->start & ~PAGE_MASK;
217
218 num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
219 num_bytes = num_pages * 8 * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
220
221 r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes,
222 AMDGPU_IB_POOL_DELAYED, &job);
223 if (r)
224 return r;
225
226 src_addr = num_dw * 4;
227 src_addr += job->ibs[0].gpu_addr;
228
229 dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
230 dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8;
231 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
232 dst_addr, num_bytes, false);
233
234 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
235 WARN_ON(job->ibs[0].length_dw > num_dw);
236
237 flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, mem);
238 if (tmz)
239 flags |= AMDGPU_PTE_TMZ;
240
241 cpu_addr = &job->ibs[0].ptr[num_dw];
242
243 if (mem->mem_type == TTM_PL_TT) {
244 dma_addr_t *dma_addr;
245
246 dma_addr = &bo->ttm->dma_address[mm_cur->start >> PAGE_SHIFT];
247 r = amdgpu_gart_map(adev, 0, num_pages, dma_addr, flags,
248 cpu_addr);
249 if (r)
250 goto error_free;
251 } else {
252 dma_addr_t dma_address;
253
254 dma_address = mm_cur->start;
255 dma_address += adev->vm_manager.vram_base_offset;
256
257 for (i = 0; i < num_pages; ++i) {
258 r = amdgpu_gart_map(adev, i << PAGE_SHIFT, 1,
259 &dma_address, flags, cpu_addr);
260 if (r)
261 goto error_free;
262
263 dma_address += PAGE_SIZE;
264 }
265 }
266
267 r = amdgpu_job_submit(job, &adev->mman.entity,
268 AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
269 if (r)
270 goto error_free;
271
272 dma_fence_put(fence);
273
274 return r;
275
276error_free:
277 amdgpu_job_free(job);
278 return r;
279}
280
281/**
282 * amdgpu_ttm_copy_mem_to_mem - Helper function for copy
283 * @adev: amdgpu device
284 * @src: buffer/address where to read from
285 * @dst: buffer/address where to write to
286 * @size: number of bytes to copy
287 * @tmz: if a secure copy should be used
288 * @resv: resv object to sync to
289 * @f: Returns the last fence if multiple jobs are submitted.
290 *
291 * The function copies @size bytes from {src->mem + src->offset} to
292 * {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a
293 * move and different for a BO to BO copy.
294 *
295 */
296int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
297 const struct amdgpu_copy_mem *src,
298 const struct amdgpu_copy_mem *dst,
299 uint64_t size, bool tmz,
300 struct dma_resv *resv,
301 struct dma_fence **f)
302{
303 const uint32_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE *
304 AMDGPU_GPU_PAGE_SIZE);
305
306 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
307 struct amdgpu_res_cursor src_mm, dst_mm;
308 struct dma_fence *fence = NULL;
309 int r = 0;
310
311 if (!adev->mman.buffer_funcs_enabled) {
312 DRM_ERROR("Trying to move memory with ring turned off.\n");
313 return -EINVAL;
314 }
315
316 amdgpu_res_first(src->mem, src->offset, size, &src_mm);
317 amdgpu_res_first(dst->mem, dst->offset, size, &dst_mm);
318
319 mutex_lock(&adev->mman.gtt_window_lock);
320 while (src_mm.remaining) {
321 uint32_t src_page_offset = src_mm.start & ~PAGE_MASK;
322 uint32_t dst_page_offset = dst_mm.start & ~PAGE_MASK;
323 struct dma_fence *next;
324 uint32_t cur_size;
325 uint64_t from, to;
326
327 /* Copy size cannot exceed GTT_MAX_BYTES. So if src or dst
328 * begins at an offset, then adjust the size accordingly
329 */
330 cur_size = max(src_page_offset, dst_page_offset);
331 cur_size = min(min3(src_mm.size, dst_mm.size, size),
332 (uint64_t)(GTT_MAX_BYTES - cur_size));
333
334 /* Map src to window 0 and dst to window 1. */
335 r = amdgpu_ttm_map_buffer(src->bo, src->mem, &src_mm,
336 PFN_UP(cur_size + src_page_offset),
337 0, ring, tmz, &from);
338 if (r)
339 goto error;
340
341 r = amdgpu_ttm_map_buffer(dst->bo, dst->mem, &dst_mm,
342 PFN_UP(cur_size + dst_page_offset),
343 1, ring, tmz, &to);
344 if (r)
345 goto error;
346
347 r = amdgpu_copy_buffer(ring, from, to, cur_size,
348 resv, &next, false, true, tmz);
349 if (r)
350 goto error;
351
352 dma_fence_put(fence);
353 fence = next;
354
355 amdgpu_res_next(&src_mm, cur_size);
356 amdgpu_res_next(&dst_mm, cur_size);
357 }
358error:
359 mutex_unlock(&adev->mman.gtt_window_lock);
360 if (f)
361 *f = dma_fence_get(fence);
362 dma_fence_put(fence);
363 return r;
364}
365
366/*
367 * amdgpu_move_blit - Copy an entire buffer to another buffer
368 *
369 * This is a helper called by amdgpu_bo_move() and amdgpu_move_vram_ram() to
370 * help move buffers to and from VRAM.
371 */
372static int amdgpu_move_blit(struct ttm_buffer_object *bo,
373 bool evict,
374 struct ttm_resource *new_mem,
375 struct ttm_resource *old_mem)
376{
377 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
378 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
379 struct amdgpu_copy_mem src, dst;
380 struct dma_fence *fence = NULL;
381 int r;
382
383 src.bo = bo;
384 dst.bo = bo;
385 src.mem = old_mem;
386 dst.mem = new_mem;
387 src.offset = 0;
388 dst.offset = 0;
389
390 r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst,
391 new_mem->num_pages << PAGE_SHIFT,
392 amdgpu_bo_encrypted(abo),
393 bo->base.resv, &fence);
394 if (r)
395 goto error;
396
397 /* clear the space being freed */
398 if (old_mem->mem_type == TTM_PL_VRAM &&
399 (abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) {
400 struct dma_fence *wipe_fence = NULL;
401
402 r = amdgpu_fill_buffer(ttm_to_amdgpu_bo(bo), AMDGPU_POISON,
403 NULL, &wipe_fence);
404 if (r) {
405 goto error;
406 } else if (wipe_fence) {
407 dma_fence_put(fence);
408 fence = wipe_fence;
409 }
410 }
411
412 /* Always block for VM page tables before committing the new location */
413 if (bo->type == ttm_bo_type_kernel)
414 r = ttm_bo_move_accel_cleanup(bo, fence, true, false, new_mem);
415 else
416 r = ttm_bo_move_accel_cleanup(bo, fence, evict, true, new_mem);
417 dma_fence_put(fence);
418 return r;
419
420error:
421 if (fence)
422 dma_fence_wait(fence, false);
423 dma_fence_put(fence);
424 return r;
425}
426
427/*
428 * amdgpu_mem_visible - Check that memory can be accessed by ttm_bo_move_memcpy
429 *
430 * Called by amdgpu_bo_move()
431 */
432static bool amdgpu_mem_visible(struct amdgpu_device *adev,
433 struct ttm_resource *mem)
434{
435 uint64_t mem_size = (u64)mem->num_pages << PAGE_SHIFT;
436 struct amdgpu_res_cursor cursor;
437
438 if (mem->mem_type == TTM_PL_SYSTEM ||
439 mem->mem_type == TTM_PL_TT)
440 return true;
441 if (mem->mem_type != TTM_PL_VRAM)
442 return false;
443
444 amdgpu_res_first(mem, 0, mem_size, &cursor);
445
446 /* ttm_resource_ioremap only supports contiguous memory */
447 if (cursor.size != mem_size)
448 return false;
449
450 return cursor.start + cursor.size <= adev->gmc.visible_vram_size;
451}
452
453/*
454 * amdgpu_bo_move - Move a buffer object to a new memory location
455 *
456 * Called by ttm_bo_handle_move_mem()
457 */
458static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
459 struct ttm_operation_ctx *ctx,
460 struct ttm_resource *new_mem,
461 struct ttm_place *hop)
462{
463 struct amdgpu_device *adev;
464 struct amdgpu_bo *abo;
465 struct ttm_resource *old_mem = bo->resource;
466 int r;
467
468 if (new_mem->mem_type == TTM_PL_TT ||
469 new_mem->mem_type == AMDGPU_PL_PREEMPT) {
470 r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, new_mem);
471 if (r)
472 return r;
473 }
474
475 /* Can't move a pinned BO */
476 abo = ttm_to_amdgpu_bo(bo);
477 if (WARN_ON_ONCE(abo->tbo.pin_count > 0))
478 return -EINVAL;
479
480 adev = amdgpu_ttm_adev(bo->bdev);
481
482 if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
483 ttm_bo_move_null(bo, new_mem);
484 goto out;
485 }
486 if (old_mem->mem_type == TTM_PL_SYSTEM &&
487 (new_mem->mem_type == TTM_PL_TT ||
488 new_mem->mem_type == AMDGPU_PL_PREEMPT)) {
489 ttm_bo_move_null(bo, new_mem);
490 goto out;
491 }
492 if ((old_mem->mem_type == TTM_PL_TT ||
493 old_mem->mem_type == AMDGPU_PL_PREEMPT) &&
494 new_mem->mem_type == TTM_PL_SYSTEM) {
495 r = ttm_bo_wait_ctx(bo, ctx);
496 if (r)
497 return r;
498
499 amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm);
500 ttm_resource_free(bo, &bo->resource);
501 ttm_bo_assign_mem(bo, new_mem);
502 goto out;
503 }
504
505 if (old_mem->mem_type == AMDGPU_PL_GDS ||
506 old_mem->mem_type == AMDGPU_PL_GWS ||
507 old_mem->mem_type == AMDGPU_PL_OA ||
508 new_mem->mem_type == AMDGPU_PL_GDS ||
509 new_mem->mem_type == AMDGPU_PL_GWS ||
510 new_mem->mem_type == AMDGPU_PL_OA) {
511 /* Nothing to save here */
512 ttm_bo_move_null(bo, new_mem);
513 goto out;
514 }
515
516 if (bo->type == ttm_bo_type_device &&
517 new_mem->mem_type == TTM_PL_VRAM &&
518 old_mem->mem_type != TTM_PL_VRAM) {
519 /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
520 * accesses the BO after it's moved.
521 */
522 abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
523 }
524
525 if (adev->mman.buffer_funcs_enabled) {
526 if (((old_mem->mem_type == TTM_PL_SYSTEM &&
527 new_mem->mem_type == TTM_PL_VRAM) ||
528 (old_mem->mem_type == TTM_PL_VRAM &&
529 new_mem->mem_type == TTM_PL_SYSTEM))) {
530 hop->fpfn = 0;
531 hop->lpfn = 0;
532 hop->mem_type = TTM_PL_TT;
533 hop->flags = 0;
534 return -EMULTIHOP;
535 }
536
537 r = amdgpu_move_blit(bo, evict, new_mem, old_mem);
538 } else {
539 r = -ENODEV;
540 }
541
542 if (r) {
543 /* Check that all memory is CPU accessible */
544 if (!amdgpu_mem_visible(adev, old_mem) ||
545 !amdgpu_mem_visible(adev, new_mem)) {
546 pr_err("Move buffer fallback to memcpy unavailable\n");
547 return r;
548 }
549
550 r = ttm_bo_move_memcpy(bo, ctx, new_mem);
551 if (r)
552 return r;
553 }
554
555out:
556 /* update statistics */
557 atomic64_add(bo->base.size, &adev->num_bytes_moved);
558 amdgpu_bo_move_notify(bo, evict, new_mem);
559 return 0;
560}
561
562/*
563 * amdgpu_ttm_io_mem_reserve - Reserve a block of memory during a fault
564 *
565 * Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault()
566 */
567static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
568 struct ttm_resource *mem)
569{
570 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
571 size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
572
573 switch (mem->mem_type) {
574 case TTM_PL_SYSTEM:
575 /* system memory */
576 return 0;
577 case TTM_PL_TT:
578 case AMDGPU_PL_PREEMPT:
579 break;
580 case TTM_PL_VRAM:
581 mem->bus.offset = mem->start << PAGE_SHIFT;
582 /* check if it's visible */
583 if ((mem->bus.offset + bus_size) > adev->gmc.visible_vram_size)
584 return -EINVAL;
585
586 if (adev->mman.aper_base_kaddr &&
587 mem->placement & TTM_PL_FLAG_CONTIGUOUS)
588 mem->bus.addr = (u8 *)adev->mman.aper_base_kaddr +
589 mem->bus.offset;
590
591 mem->bus.offset += adev->gmc.aper_base;
592 mem->bus.is_iomem = true;
593 break;
594 default:
595 return -EINVAL;
596 }
597 return 0;
598}
599
600static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
601 unsigned long page_offset)
602{
603 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
604 struct amdgpu_res_cursor cursor;
605
606 amdgpu_res_first(bo->resource, (u64)page_offset << PAGE_SHIFT, 0,
607 &cursor);
608 return (adev->gmc.aper_base + cursor.start) >> PAGE_SHIFT;
609}
610
611/**
612 * amdgpu_ttm_domain_start - Returns GPU start address
613 * @adev: amdgpu device object
614 * @type: type of the memory
615 *
616 * Returns:
617 * GPU start address of a memory domain
618 */
619
620uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type)
621{
622 switch (type) {
623 case TTM_PL_TT:
624 return adev->gmc.gart_start;
625 case TTM_PL_VRAM:
626 return adev->gmc.vram_start;
627 }
628
629 return 0;
630}
631
632/*
633 * TTM backend functions.
634 */
635struct amdgpu_ttm_tt {
636 struct ttm_tt ttm;
637 struct drm_gem_object *gobj;
638 u64 offset;
639 uint64_t userptr;
640 struct task_struct *usertask;
641 uint32_t userflags;
642 bool bound;
643#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
644 struct hmm_range *range;
645#endif
646};
647
648#ifdef CONFIG_DRM_AMDGPU_USERPTR
649/*
650 * amdgpu_ttm_tt_get_user_pages - get device accessible pages that back user
651 * memory and start HMM tracking CPU page table update
652 *
653 * Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only
654 * once afterwards to stop HMM tracking
655 */
656int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
657{
658 struct ttm_tt *ttm = bo->tbo.ttm;
659 struct amdgpu_ttm_tt *gtt = (void *)ttm;
660 unsigned long start = gtt->userptr;
661 struct vm_area_struct *vma;
662 struct mm_struct *mm;
663 bool readonly;
664 int r = 0;
665
666 mm = bo->notifier.mm;
667 if (unlikely(!mm)) {
668 DRM_DEBUG_DRIVER("BO is not registered?\n");
669 return -EFAULT;
670 }
671
672 /* Another get_user_pages is running at the same time?? */
673 if (WARN_ON(gtt->range))
674 return -EFAULT;
675
676 if (!mmget_not_zero(mm)) /* Happens during process shutdown */
677 return -ESRCH;
678
679 mmap_read_lock(mm);
680 vma = vma_lookup(mm, start);
681 if (unlikely(!vma)) {
682 r = -EFAULT;
683 goto out_unlock;
684 }
685 if (unlikely((gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) &&
686 vma->vm_file)) {
687 r = -EPERM;
688 goto out_unlock;
689 }
690
691 readonly = amdgpu_ttm_tt_is_readonly(ttm);
692 r = amdgpu_hmm_range_get_pages(&bo->notifier, mm, pages, start,
693 ttm->num_pages, >t->range, readonly,
694 true, NULL);
695out_unlock:
696 mmap_read_unlock(mm);
697 mmput(mm);
698
699 return r;
700}
701
702/*
703 * amdgpu_ttm_tt_userptr_range_done - stop HMM track the CPU page table change
704 * Check if the pages backing this ttm range have been invalidated
705 *
706 * Returns: true if pages are still valid
707 */
708bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
709{
710 struct amdgpu_ttm_tt *gtt = (void *)ttm;
711 bool r = false;
712
713 if (!gtt || !gtt->userptr)
714 return false;
715
716 DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%x\n",
717 gtt->userptr, ttm->num_pages);
718
719 WARN_ONCE(!gtt->range || !gtt->range->hmm_pfns,
720 "No user pages to check\n");
721
722 if (gtt->range) {
723 /*
724 * FIXME: Must always hold notifier_lock for this, and must
725 * not ignore the return code.
726 */
727 r = amdgpu_hmm_range_get_pages_done(gtt->range);
728 gtt->range = NULL;
729 }
730
731 return !r;
732}
733#endif
734
735/*
736 * amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages as necessary.
737 *
738 * Called by amdgpu_cs_list_validate(). This creates the page list
739 * that backs user memory and will ultimately be mapped into the device
740 * address space.
741 */
742void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
743{
744 unsigned long i;
745
746 for (i = 0; i < ttm->num_pages; ++i)
747 ttm->pages[i] = pages ? pages[i] : NULL;
748}
749
750/*
751 * amdgpu_ttm_tt_pin_userptr - prepare the sg table with the user pages
752 *
753 * Called by amdgpu_ttm_backend_bind()
754 **/
755static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev,
756 struct ttm_tt *ttm)
757{
758 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
759 struct amdgpu_ttm_tt *gtt = (void *)ttm;
760 int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
761 enum dma_data_direction direction = write ?
762 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
763 int r;
764
765 /* Allocate an SG array and squash pages into it */
766 r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
767 (u64)ttm->num_pages << PAGE_SHIFT,
768 GFP_KERNEL);
769 if (r)
770 goto release_sg;
771
772 /* Map SG to device */
773 r = dma_map_sgtable(adev->dev, ttm->sg, direction, 0);
774 if (r)
775 goto release_sg;
776
777 /* convert SG to linear array of pages and dma addresses */
778 drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
779 ttm->num_pages);
780
781 return 0;
782
783release_sg:
784 kfree(ttm->sg);
785 ttm->sg = NULL;
786 return r;
787}
788
789/*
790 * amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages
791 */
792static void amdgpu_ttm_tt_unpin_userptr(struct ttm_device *bdev,
793 struct ttm_tt *ttm)
794{
795 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
796 struct amdgpu_ttm_tt *gtt = (void *)ttm;
797 int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
798 enum dma_data_direction direction = write ?
799 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
800
801 /* double check that we don't free the table twice */
802 if (!ttm->sg || !ttm->sg->sgl)
803 return;
804
805 /* unmap the pages mapped to the device */
806 dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
807 sg_free_table(ttm->sg);
808
809#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
810 if (gtt->range) {
811 unsigned long i;
812
813 for (i = 0; i < ttm->num_pages; i++) {
814 if (ttm->pages[i] !=
815 hmm_pfn_to_page(gtt->range->hmm_pfns[i]))
816 break;
817 }
818
819 WARN((i == ttm->num_pages), "Missing get_user_page_done\n");
820 }
821#endif
822}
823
824static int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
825 struct ttm_buffer_object *tbo,
826 uint64_t flags)
827{
828 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo);
829 struct ttm_tt *ttm = tbo->ttm;
830 struct amdgpu_ttm_tt *gtt = (void *)ttm;
831 int r;
832
833 if (amdgpu_bo_encrypted(abo))
834 flags |= AMDGPU_PTE_TMZ;
835
836 if (abo->flags & AMDGPU_GEM_CREATE_CP_MQD_GFX9) {
837 uint64_t page_idx = 1;
838
839 r = amdgpu_gart_bind(adev, gtt->offset, page_idx,
840 gtt->ttm.dma_address, flags);
841 if (r)
842 goto gart_bind_fail;
843
844 /* The memory type of the first page defaults to UC. Now
845 * modify the memory type to NC from the second page of
846 * the BO onward.
847 */
848 flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
849 flags |= AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_NC);
850
851 r = amdgpu_gart_bind(adev,
852 gtt->offset + (page_idx << PAGE_SHIFT),
853 ttm->num_pages - page_idx,
854 &(gtt->ttm.dma_address[page_idx]), flags);
855 } else {
856 r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
857 gtt->ttm.dma_address, flags);
858 }
859
860gart_bind_fail:
861 if (r)
862 DRM_ERROR("failed to bind %u pages at 0x%08llX\n",
863 ttm->num_pages, gtt->offset);
864
865 return r;
866}
867
868/*
869 * amdgpu_ttm_backend_bind - Bind GTT memory
870 *
871 * Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem().
872 * This handles binding GTT memory to the device address space.
873 */
874static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
875 struct ttm_tt *ttm,
876 struct ttm_resource *bo_mem)
877{
878 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
879 struct amdgpu_ttm_tt *gtt = (void*)ttm;
880 uint64_t flags;
881 int r = 0;
882
883 if (!bo_mem)
884 return -EINVAL;
885
886 if (gtt->bound)
887 return 0;
888
889 if (gtt->userptr) {
890 r = amdgpu_ttm_tt_pin_userptr(bdev, ttm);
891 if (r) {
892 DRM_ERROR("failed to pin userptr\n");
893 return r;
894 }
895 } else if (ttm->page_flags & TTM_PAGE_FLAG_SG) {
896 if (!ttm->sg) {
897 struct dma_buf_attachment *attach;
898 struct sg_table *sgt;
899
900 attach = gtt->gobj->import_attach;
901 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
902 if (IS_ERR(sgt))
903 return PTR_ERR(sgt);
904
905 ttm->sg = sgt;
906 }
907
908 drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
909 ttm->num_pages);
910 }
911
912 if (!ttm->num_pages) {
913 WARN(1, "nothing to bind %u pages for mreg %p back %p!\n",
914 ttm->num_pages, bo_mem, ttm);
915 }
916
917 if (bo_mem->mem_type == AMDGPU_PL_GDS ||
918 bo_mem->mem_type == AMDGPU_PL_GWS ||
919 bo_mem->mem_type == AMDGPU_PL_OA)
920 return -EINVAL;
921
922 if (bo_mem->mem_type != TTM_PL_TT ||
923 !amdgpu_gtt_mgr_has_gart_addr(bo_mem)) {
924 gtt->offset = AMDGPU_BO_INVALID_OFFSET;
925 return 0;
926 }
927
928 /* compute PTE flags relevant to this BO memory */
929 flags = amdgpu_ttm_tt_pte_flags(adev, ttm, bo_mem);
930
931 /* bind pages into GART page tables */
932 gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
933 r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
934 gtt->ttm.dma_address, flags);
935
936 if (r)
937 DRM_ERROR("failed to bind %u pages at 0x%08llX\n",
938 ttm->num_pages, gtt->offset);
939 gtt->bound = true;
940 return r;
941}
942
943/*
944 * amdgpu_ttm_alloc_gart - Make sure buffer object is accessible either
945 * through AGP or GART aperture.
946 *
947 * If bo is accessible through AGP aperture, then use AGP aperture
948 * to access bo; otherwise allocate logical space in GART aperture
949 * and map bo to GART aperture.
950 */
951int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
952{
953 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
954 struct ttm_operation_ctx ctx = { false, false };
955 struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
956 struct ttm_placement placement;
957 struct ttm_place placements;
958 struct ttm_resource *tmp;
959 uint64_t addr, flags;
960 int r;
961
962 if (bo->resource->start != AMDGPU_BO_INVALID_OFFSET)
963 return 0;
964
965 addr = amdgpu_gmc_agp_addr(bo);
966 if (addr != AMDGPU_BO_INVALID_OFFSET) {
967 bo->resource->start = addr >> PAGE_SHIFT;
968 return 0;
969 }
970
971 /* allocate GART space */
972 placement.num_placement = 1;
973 placement.placement = &placements;
974 placement.num_busy_placement = 1;
975 placement.busy_placement = &placements;
976 placements.fpfn = 0;
977 placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
978 placements.mem_type = TTM_PL_TT;
979 placements.flags = bo->resource->placement;
980
981 r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
982 if (unlikely(r))
983 return r;
984
985 /* compute PTE flags for this buffer object */
986 flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, tmp);
987
988 /* Bind pages */
989 gtt->offset = (u64)tmp->start << PAGE_SHIFT;
990 r = amdgpu_ttm_gart_bind(adev, bo, flags);
991 if (unlikely(r)) {
992 ttm_resource_free(bo, &tmp);
993 return r;
994 }
995
996 amdgpu_gart_invalidate_tlb(adev);
997 ttm_resource_free(bo, &bo->resource);
998 ttm_bo_assign_mem(bo, tmp);
999
1000 return 0;
1001}
1002
1003/*
1004 * amdgpu_ttm_recover_gart - Rebind GTT pages
1005 *
1006 * Called by amdgpu_gtt_mgr_recover() from amdgpu_device_reset() to
1007 * rebind GTT pages during a GPU reset.
1008 */
1009int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
1010{
1011 struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
1012 uint64_t flags;
1013 int r;
1014
1015 if (!tbo->ttm)
1016 return 0;
1017
1018 flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, tbo->resource);
1019 r = amdgpu_ttm_gart_bind(adev, tbo, flags);
1020
1021 return r;
1022}
1023
1024/*
1025 * amdgpu_ttm_backend_unbind - Unbind GTT mapped pages
1026 *
1027 * Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and
1028 * ttm_tt_destroy().
1029 */
1030static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
1031 struct ttm_tt *ttm)
1032{
1033 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
1034 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1035 int r;
1036
1037 /* if the pages have userptr pinning then clear that first */
1038 if (gtt->userptr) {
1039 amdgpu_ttm_tt_unpin_userptr(bdev, ttm);
1040 } else if (ttm->sg && gtt->gobj->import_attach) {
1041 struct dma_buf_attachment *attach;
1042
1043 attach = gtt->gobj->import_attach;
1044 dma_buf_unmap_attachment(attach, ttm->sg, DMA_BIDIRECTIONAL);
1045 ttm->sg = NULL;
1046 }
1047
1048 if (!gtt->bound)
1049 return;
1050
1051 if (gtt->offset == AMDGPU_BO_INVALID_OFFSET)
1052 return;
1053
1054 /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
1055 r = amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages);
1056 if (r)
1057 DRM_ERROR("failed to unbind %u pages at 0x%08llX\n",
1058 gtt->ttm.num_pages, gtt->offset);
1059 gtt->bound = false;
1060}
1061
1062static void amdgpu_ttm_backend_destroy(struct ttm_device *bdev,
1063 struct ttm_tt *ttm)
1064{
1065 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1066
1067 amdgpu_ttm_backend_unbind(bdev, ttm);
1068 ttm_tt_destroy_common(bdev, ttm);
1069 if (gtt->usertask)
1070 put_task_struct(gtt->usertask);
1071
1072 ttm_tt_fini(>t->ttm);
1073 kfree(gtt);
1074}
1075
1076/**
1077 * amdgpu_ttm_tt_create - Create a ttm_tt object for a given BO
1078 *
1079 * @bo: The buffer object to create a GTT ttm_tt object around
1080 * @page_flags: Page flags to be added to the ttm_tt object
1081 *
1082 * Called by ttm_tt_create().
1083 */
1084static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
1085 uint32_t page_flags)
1086{
1087 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1088 struct amdgpu_ttm_tt *gtt;
1089 enum ttm_caching caching;
1090
1091 gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
1092 if (gtt == NULL) {
1093 return NULL;
1094 }
1095 gtt->gobj = &bo->base;
1096
1097 if (abo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
1098 caching = ttm_write_combined;
1099 else
1100 caching = ttm_cached;
1101
1102 /* allocate space for the uninitialized page entries */
1103 if (ttm_sg_tt_init(>t->ttm, bo, page_flags, caching)) {
1104 kfree(gtt);
1105 return NULL;
1106 }
1107 return >t->ttm;
1108}
1109
1110/*
1111 * amdgpu_ttm_tt_populate - Map GTT pages visible to the device
1112 *
1113 * Map the pages of a ttm_tt object to an address space visible
1114 * to the underlying device.
1115 */
1116static int amdgpu_ttm_tt_populate(struct ttm_device *bdev,
1117 struct ttm_tt *ttm,
1118 struct ttm_operation_ctx *ctx)
1119{
1120 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
1121 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1122
1123 /* user pages are bound by amdgpu_ttm_tt_pin_userptr() */
1124 if (gtt && gtt->userptr) {
1125 ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
1126 if (!ttm->sg)
1127 return -ENOMEM;
1128 return 0;
1129 }
1130
1131 if (ttm->page_flags & TTM_PAGE_FLAG_SG)
1132 return 0;
1133
1134 return ttm_pool_alloc(&adev->mman.bdev.pool, ttm, ctx);
1135}
1136
1137/*
1138 * amdgpu_ttm_tt_unpopulate - unmap GTT pages and unpopulate page arrays
1139 *
1140 * Unmaps pages of a ttm_tt object from the device address space and
1141 * unpopulates the page array backing it.
1142 */
1143static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev,
1144 struct ttm_tt *ttm)
1145{
1146 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1147 struct amdgpu_device *adev;
1148
1149 if (gtt && gtt->userptr) {
1150 amdgpu_ttm_tt_set_user_pages(ttm, NULL);
1151 kfree(ttm->sg);
1152 ttm->sg = NULL;
1153 return;
1154 }
1155
1156 if (ttm->page_flags & TTM_PAGE_FLAG_SG)
1157 return;
1158
1159 adev = amdgpu_ttm_adev(bdev);
1160 return ttm_pool_free(&adev->mman.bdev.pool, ttm);
1161}
1162
1163/**
1164 * amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt for the current
1165 * task
1166 *
1167 * @bo: The ttm_buffer_object to bind this userptr to
1168 * @addr: The address in the current tasks VM space to use
1169 * @flags: Requirements of userptr object.
1170 *
1171 * Called by amdgpu_gem_userptr_ioctl() to bind userptr pages
1172 * to current task
1173 */
1174int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
1175 uint64_t addr, uint32_t flags)
1176{
1177 struct amdgpu_ttm_tt *gtt;
1178
1179 if (!bo->ttm) {
1180 /* TODO: We want a separate TTM object type for userptrs */
1181 bo->ttm = amdgpu_ttm_tt_create(bo, 0);
1182 if (bo->ttm == NULL)
1183 return -ENOMEM;
1184 }
1185
1186 /* Set TTM_PAGE_FLAG_SG before populate but after create. */
1187 bo->ttm->page_flags |= TTM_PAGE_FLAG_SG;
1188
1189 gtt = (void *)bo->ttm;
1190 gtt->userptr = addr;
1191 gtt->userflags = flags;
1192
1193 if (gtt->usertask)
1194 put_task_struct(gtt->usertask);
1195 gtt->usertask = current->group_leader;
1196 get_task_struct(gtt->usertask);
1197
1198 return 0;
1199}
1200
1201/*
1202 * amdgpu_ttm_tt_get_usermm - Return memory manager for ttm_tt object
1203 */
1204struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
1205{
1206 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1207
1208 if (gtt == NULL)
1209 return NULL;
1210
1211 if (gtt->usertask == NULL)
1212 return NULL;
1213
1214 return gtt->usertask->mm;
1215}
1216
1217/*
1218 * amdgpu_ttm_tt_affect_userptr - Determine if a ttm_tt object lays inside an
1219 * address range for the current task.
1220 *
1221 */
1222bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
1223 unsigned long end)
1224{
1225 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1226 unsigned long size;
1227
1228 if (gtt == NULL || !gtt->userptr)
1229 return false;
1230
1231 /* Return false if no part of the ttm_tt object lies within
1232 * the range
1233 */
1234 size = (unsigned long)gtt->ttm.num_pages * PAGE_SIZE;
1235 if (gtt->userptr > end || gtt->userptr + size <= start)
1236 return false;
1237
1238 return true;
1239}
1240
1241/*
1242 * amdgpu_ttm_tt_is_userptr - Have the pages backing by userptr?
1243 */
1244bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm)
1245{
1246 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1247
1248 if (gtt == NULL || !gtt->userptr)
1249 return false;
1250
1251 return true;
1252}
1253
1254/*
1255 * amdgpu_ttm_tt_is_readonly - Is the ttm_tt object read only?
1256 */
1257bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
1258{
1259 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1260
1261 if (gtt == NULL)
1262 return false;
1263
1264 return !!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
1265}
1266
1267/**
1268 * amdgpu_ttm_tt_pde_flags - Compute PDE flags for ttm_tt object
1269 *
1270 * @ttm: The ttm_tt object to compute the flags for
1271 * @mem: The memory registry backing this ttm_tt object
1272 *
1273 * Figure out the flags to use for a VM PDE (Page Directory Entry).
1274 */
1275uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem)
1276{
1277 uint64_t flags = 0;
1278
1279 if (mem && mem->mem_type != TTM_PL_SYSTEM)
1280 flags |= AMDGPU_PTE_VALID;
1281
1282 if (mem && (mem->mem_type == TTM_PL_TT ||
1283 mem->mem_type == AMDGPU_PL_PREEMPT)) {
1284 flags |= AMDGPU_PTE_SYSTEM;
1285
1286 if (ttm->caching == ttm_cached)
1287 flags |= AMDGPU_PTE_SNOOPED;
1288 }
1289
1290 if (mem && mem->mem_type == TTM_PL_VRAM &&
1291 mem->bus.caching == ttm_cached)
1292 flags |= AMDGPU_PTE_SNOOPED;
1293
1294 return flags;
1295}
1296
1297/**
1298 * amdgpu_ttm_tt_pte_flags - Compute PTE flags for ttm_tt object
1299 *
1300 * @adev: amdgpu_device pointer
1301 * @ttm: The ttm_tt object to compute the flags for
1302 * @mem: The memory registry backing this ttm_tt object
1303 *
1304 * Figure out the flags to use for a VM PTE (Page Table Entry).
1305 */
1306uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
1307 struct ttm_resource *mem)
1308{
1309 uint64_t flags = amdgpu_ttm_tt_pde_flags(ttm, mem);
1310
1311 flags |= adev->gart.gart_pte_flags;
1312 flags |= AMDGPU_PTE_READABLE;
1313
1314 if (!amdgpu_ttm_tt_is_readonly(ttm))
1315 flags |= AMDGPU_PTE_WRITEABLE;
1316
1317 return flags;
1318}
1319
1320/*
1321 * amdgpu_ttm_bo_eviction_valuable - Check to see if we can evict a buffer
1322 * object.
1323 *
1324 * Return true if eviction is sensible. Called by ttm_mem_evict_first() on
1325 * behalf of ttm_bo_mem_force_space() which tries to evict buffer objects until
1326 * it can find space for a new object and by ttm_bo_force_list_clean() which is
1327 * used to clean out a memory space.
1328 */
1329static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
1330 const struct ttm_place *place)
1331{
1332 unsigned long num_pages = bo->resource->num_pages;
1333 struct amdgpu_res_cursor cursor;
1334 struct dma_resv_list *flist;
1335 struct dma_fence *f;
1336 int i;
1337
1338 /* Swapout? */
1339 if (bo->resource->mem_type == TTM_PL_SYSTEM)
1340 return true;
1341
1342 if (bo->type == ttm_bo_type_kernel &&
1343 !amdgpu_vm_evictable(ttm_to_amdgpu_bo(bo)))
1344 return false;
1345
1346 /* If bo is a KFD BO, check if the bo belongs to the current process.
1347 * If true, then return false as any KFD process needs all its BOs to
1348 * be resident to run successfully
1349 */
1350 flist = dma_resv_shared_list(bo->base.resv);
1351 if (flist) {
1352 for (i = 0; i < flist->shared_count; ++i) {
1353 f = rcu_dereference_protected(flist->shared[i],
1354 dma_resv_held(bo->base.resv));
1355 if (amdkfd_fence_check_mm(f, current->mm))
1356 return false;
1357 }
1358 }
1359
1360 switch (bo->resource->mem_type) {
1361 case AMDGPU_PL_PREEMPT:
1362 /* Preemptible BOs don't own system resources managed by the
1363 * driver (pages, VRAM, GART space). They point to resources
1364 * owned by someone else (e.g. pageable memory in user mode
1365 * or a DMABuf). They are used in a preemptible context so we
1366 * can guarantee no deadlocks and good QoS in case of MMU
1367 * notifiers or DMABuf move notifiers from the resource owner.
1368 */
1369 return false;
1370 case TTM_PL_TT:
1371 if (amdgpu_bo_is_amdgpu_bo(bo) &&
1372 amdgpu_bo_encrypted(ttm_to_amdgpu_bo(bo)))
1373 return false;
1374 return true;
1375
1376 case TTM_PL_VRAM:
1377 /* Check each drm MM node individually */
1378 amdgpu_res_first(bo->resource, 0, (u64)num_pages << PAGE_SHIFT,
1379 &cursor);
1380 while (cursor.remaining) {
1381 if (place->fpfn < PFN_DOWN(cursor.start + cursor.size)
1382 && !(place->lpfn &&
1383 place->lpfn <= PFN_DOWN(cursor.start)))
1384 return true;
1385
1386 amdgpu_res_next(&cursor, cursor.size);
1387 }
1388 return false;
1389
1390 default:
1391 break;
1392 }
1393
1394 return ttm_bo_eviction_valuable(bo, place);
1395}
1396
1397/**
1398 * amdgpu_ttm_access_memory - Read or Write memory that backs a buffer object.
1399 *
1400 * @bo: The buffer object to read/write
1401 * @offset: Offset into buffer object
1402 * @buf: Secondary buffer to write/read from
1403 * @len: Length in bytes of access
1404 * @write: true if writing
1405 *
1406 * This is used to access VRAM that backs a buffer object via MMIO
1407 * access for debugging purposes.
1408 */
1409static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
1410 unsigned long offset, void *buf, int len,
1411 int write)
1412{
1413 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1414 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
1415 struct amdgpu_res_cursor cursor;
1416 unsigned long flags;
1417 uint32_t value = 0;
1418 int ret = 0;
1419
1420 if (bo->resource->mem_type != TTM_PL_VRAM)
1421 return -EIO;
1422
1423 amdgpu_res_first(bo->resource, offset, len, &cursor);
1424 while (cursor.remaining) {
1425 uint64_t aligned_pos = cursor.start & ~(uint64_t)3;
1426 uint64_t bytes = 4 - (cursor.start & 3);
1427 uint32_t shift = (cursor.start & 3) * 8;
1428 uint32_t mask = 0xffffffff << shift;
1429
1430 if (cursor.size < bytes) {
1431 mask &= 0xffffffff >> (bytes - cursor.size) * 8;
1432 bytes = cursor.size;
1433 }
1434
1435 if (mask != 0xffffffff) {
1436 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
1437 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000);
1438 WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31);
1439 value = RREG32_NO_KIQ(mmMM_DATA);
1440 if (write) {
1441 value &= ~mask;
1442 value |= (*(uint32_t *)buf << shift) & mask;
1443 WREG32_NO_KIQ(mmMM_DATA, value);
1444 }
1445 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
1446 if (!write) {
1447 value = (value & mask) >> shift;
1448 memcpy(buf, &value, bytes);
1449 }
1450 } else {
1451 bytes = cursor.size & ~0x3ULL;
1452 amdgpu_device_vram_access(adev, cursor.start,
1453 (uint32_t *)buf, bytes,
1454 write);
1455 }
1456
1457 ret += bytes;
1458 buf = (uint8_t *)buf + bytes;
1459 amdgpu_res_next(&cursor, bytes);
1460 }
1461
1462 return ret;
1463}
1464
1465static void
1466amdgpu_bo_delete_mem_notify(struct ttm_buffer_object *bo)
1467{
1468 amdgpu_bo_move_notify(bo, false, NULL);
1469}
1470
1471static struct ttm_device_funcs amdgpu_bo_driver = {
1472 .ttm_tt_create = &amdgpu_ttm_tt_create,
1473 .ttm_tt_populate = &amdgpu_ttm_tt_populate,
1474 .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
1475 .ttm_tt_destroy = &amdgpu_ttm_backend_destroy,
1476 .eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
1477 .evict_flags = &amdgpu_evict_flags,
1478 .move = &amdgpu_bo_move,
1479 .delete_mem_notify = &amdgpu_bo_delete_mem_notify,
1480 .release_notify = &amdgpu_bo_release_notify,
1481 .io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
1482 .io_mem_pfn = amdgpu_ttm_io_mem_pfn,
1483 .access_memory = &amdgpu_ttm_access_memory,
1484 .del_from_lru_notify = &amdgpu_vm_del_from_lru_notify
1485};
1486
1487/*
1488 * Firmware Reservation functions
1489 */
1490/**
1491 * amdgpu_ttm_fw_reserve_vram_fini - free fw reserved vram
1492 *
1493 * @adev: amdgpu_device pointer
1494 *
1495 * free fw reserved vram if it has been reserved.
1496 */
1497static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev)
1498{
1499 amdgpu_bo_free_kernel(&adev->mman.fw_vram_usage_reserved_bo,
1500 NULL, &adev->mman.fw_vram_usage_va);
1501}
1502
1503/**
1504 * amdgpu_ttm_fw_reserve_vram_init - create bo vram reservation from fw
1505 *
1506 * @adev: amdgpu_device pointer
1507 *
1508 * create bo vram reservation from fw.
1509 */
1510static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
1511{
1512 uint64_t vram_size = adev->gmc.visible_vram_size;
1513
1514 adev->mman.fw_vram_usage_va = NULL;
1515 adev->mman.fw_vram_usage_reserved_bo = NULL;
1516
1517 if (adev->mman.fw_vram_usage_size == 0 ||
1518 adev->mman.fw_vram_usage_size > vram_size)
1519 return 0;
1520
1521 return amdgpu_bo_create_kernel_at(adev,
1522 adev->mman.fw_vram_usage_start_offset,
1523 adev->mman.fw_vram_usage_size,
1524 AMDGPU_GEM_DOMAIN_VRAM,
1525 &adev->mman.fw_vram_usage_reserved_bo,
1526 &adev->mman.fw_vram_usage_va);
1527}
1528
1529/*
1530 * Memoy training reservation functions
1531 */
1532
1533/**
1534 * amdgpu_ttm_training_reserve_vram_fini - free memory training reserved vram
1535 *
1536 * @adev: amdgpu_device pointer
1537 *
1538 * free memory training reserved vram if it has been reserved.
1539 */
1540static int amdgpu_ttm_training_reserve_vram_fini(struct amdgpu_device *adev)
1541{
1542 struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
1543
1544 ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
1545 amdgpu_bo_free_kernel(&ctx->c2p_bo, NULL, NULL);
1546 ctx->c2p_bo = NULL;
1547
1548 return 0;
1549}
1550
1551static void amdgpu_ttm_training_data_block_init(struct amdgpu_device *adev)
1552{
1553 struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
1554
1555 memset(ctx, 0, sizeof(*ctx));
1556
1557 ctx->c2p_train_data_offset =
1558 ALIGN((adev->gmc.mc_vram_size - adev->mman.discovery_tmr_size - SZ_1M), SZ_1M);
1559 ctx->p2c_train_data_offset =
1560 (adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET);
1561 ctx->train_data_size =
1562 GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES;
1563
1564 DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
1565 ctx->train_data_size,
1566 ctx->p2c_train_data_offset,
1567 ctx->c2p_train_data_offset);
1568}
1569
1570/*
1571 * reserve TMR memory at the top of VRAM which holds
1572 * IP Discovery data and is protected by PSP.
1573 */
1574static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
1575{
1576 int ret;
1577 struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
1578 bool mem_train_support = false;
1579
1580 if (!amdgpu_sriov_vf(adev)) {
1581 if (amdgpu_atomfirmware_mem_training_supported(adev))
1582 mem_train_support = true;
1583 else
1584 DRM_DEBUG("memory training does not support!\n");
1585 }
1586
1587 /*
1588 * Query reserved tmr size through atom firmwareinfo for Sienna_Cichlid and onwards for all
1589 * the use cases (IP discovery/G6 memory training/profiling/diagnostic data.etc)
1590 *
1591 * Otherwise, fallback to legacy approach to check and reserve tmr block for ip
1592 * discovery data and G6 memory training data respectively
1593 */
1594 adev->mman.discovery_tmr_size =
1595 amdgpu_atomfirmware_get_fw_reserved_fb_size(adev);
1596 if (!adev->mman.discovery_tmr_size)
1597 adev->mman.discovery_tmr_size = DISCOVERY_TMR_OFFSET;
1598
1599 if (mem_train_support) {
1600 /* reserve vram for mem train according to TMR location */
1601 amdgpu_ttm_training_data_block_init(adev);
1602 ret = amdgpu_bo_create_kernel_at(adev,
1603 ctx->c2p_train_data_offset,
1604 ctx->train_data_size,
1605 AMDGPU_GEM_DOMAIN_VRAM,
1606 &ctx->c2p_bo,
1607 NULL);
1608 if (ret) {
1609 DRM_ERROR("alloc c2p_bo failed(%d)!\n", ret);
1610 amdgpu_ttm_training_reserve_vram_fini(adev);
1611 return ret;
1612 }
1613 ctx->init = PSP_MEM_TRAIN_RESERVE_SUCCESS;
1614 }
1615
1616 ret = amdgpu_bo_create_kernel_at(adev,
1617 adev->gmc.real_vram_size - adev->mman.discovery_tmr_size,
1618 adev->mman.discovery_tmr_size,
1619 AMDGPU_GEM_DOMAIN_VRAM,
1620 &adev->mman.discovery_memory,
1621 NULL);
1622 if (ret) {
1623 DRM_ERROR("alloc tmr failed(%d)!\n", ret);
1624 amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL);
1625 return ret;
1626 }
1627
1628 return 0;
1629}
1630
1631/*
1632 * amdgpu_ttm_init - Init the memory management (ttm) as well as various
1633 * gtt/vram related fields.
1634 *
1635 * This initializes all of the memory space pools that the TTM layer
1636 * will need such as the GTT space (system memory mapped to the device),
1637 * VRAM (on-board memory), and on-chip memories (GDS, GWS, OA) which
1638 * can be mapped per VMID.
1639 */
1640int amdgpu_ttm_init(struct amdgpu_device *adev)
1641{
1642 uint64_t gtt_size;
1643 int r;
1644 u64 vis_vram_limit;
1645
1646 mutex_init(&adev->mman.gtt_window_lock);
1647
1648 /* No others user of address space so set it to 0 */
1649 r = ttm_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev,
1650 adev_to_drm(adev)->anon_inode->i_mapping,
1651 adev_to_drm(adev)->vma_offset_manager,
1652 adev->need_swiotlb,
1653 dma_addressing_limited(adev->dev));
1654 if (r) {
1655 DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
1656 return r;
1657 }
1658 adev->mman.initialized = true;
1659
1660 /* Initialize VRAM pool with all of VRAM divided into pages */
1661 r = amdgpu_vram_mgr_init(adev);
1662 if (r) {
1663 DRM_ERROR("Failed initializing VRAM heap.\n");
1664 return r;
1665 }
1666
1667 /* Reduce size of CPU-visible VRAM if requested */
1668 vis_vram_limit = (u64)amdgpu_vis_vram_limit * 1024 * 1024;
1669 if (amdgpu_vis_vram_limit > 0 &&
1670 vis_vram_limit <= adev->gmc.visible_vram_size)
1671 adev->gmc.visible_vram_size = vis_vram_limit;
1672
1673 /* Change the size here instead of the init above so only lpfn is affected */
1674 amdgpu_ttm_set_buffer_funcs_status(adev, false);
1675#ifdef CONFIG_64BIT
1676#ifdef CONFIG_X86
1677 if (adev->gmc.xgmi.connected_to_cpu)
1678 adev->mman.aper_base_kaddr = ioremap_cache(adev->gmc.aper_base,
1679 adev->gmc.visible_vram_size);
1680
1681 else
1682#endif
1683 adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base,
1684 adev->gmc.visible_vram_size);
1685#endif
1686
1687 /*
1688 *The reserved vram for firmware must be pinned to the specified
1689 *place on the VRAM, so reserve it early.
1690 */
1691 r = amdgpu_ttm_fw_reserve_vram_init(adev);
1692 if (r) {
1693 return r;
1694 }
1695
1696 /*
1697 * only NAVI10 and onwards ASIC support for IP discovery.
1698 * If IP discovery enabled, a block of memory should be
1699 * reserved for IP discovey.
1700 */
1701 if (adev->mman.discovery_bin) {
1702 r = amdgpu_ttm_reserve_tmr(adev);
1703 if (r)
1704 return r;
1705 }
1706
1707 /* allocate memory as required for VGA
1708 * This is used for VGA emulation and pre-OS scanout buffers to
1709 * avoid display artifacts while transitioning between pre-OS
1710 * and driver. */
1711 r = amdgpu_bo_create_kernel_at(adev, 0, adev->mman.stolen_vga_size,
1712 AMDGPU_GEM_DOMAIN_VRAM,
1713 &adev->mman.stolen_vga_memory,
1714 NULL);
1715 if (r)
1716 return r;
1717 r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_vga_size,
1718 adev->mman.stolen_extended_size,
1719 AMDGPU_GEM_DOMAIN_VRAM,
1720 &adev->mman.stolen_extended_memory,
1721 NULL);
1722 if (r)
1723 return r;
1724 r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_reserved_offset,
1725 adev->mman.stolen_reserved_size,
1726 AMDGPU_GEM_DOMAIN_VRAM,
1727 &adev->mman.stolen_reserved_memory,
1728 NULL);
1729 if (r)
1730 return r;
1731
1732 DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
1733 (unsigned) (adev->gmc.real_vram_size / (1024 * 1024)));
1734
1735 /* Compute GTT size, either bsaed on 3/4th the size of RAM size
1736 * or whatever the user passed on module init */
1737 if (amdgpu_gtt_size == -1) {
1738 struct sysinfo si;
1739
1740 si_meminfo(&si);
1741 gtt_size = min(max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
1742 adev->gmc.mc_vram_size),
1743 ((uint64_t)si.totalram * si.mem_unit * 3/4));
1744 }
1745 else
1746 gtt_size = (uint64_t)amdgpu_gtt_size << 20;
1747
1748 /* Initialize GTT memory pool */
1749 r = amdgpu_gtt_mgr_init(adev, gtt_size);
1750 if (r) {
1751 DRM_ERROR("Failed initializing GTT heap.\n");
1752 return r;
1753 }
1754 DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
1755 (unsigned)(gtt_size / (1024 * 1024)));
1756
1757 /* Initialize preemptible memory pool */
1758 r = amdgpu_preempt_mgr_init(adev);
1759 if (r) {
1760 DRM_ERROR("Failed initializing PREEMPT heap.\n");
1761 return r;
1762 }
1763
1764 /* Initialize various on-chip memory pools */
1765 r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GDS, adev->gds.gds_size);
1766 if (r) {
1767 DRM_ERROR("Failed initializing GDS heap.\n");
1768 return r;
1769 }
1770
1771 r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GWS, adev->gds.gws_size);
1772 if (r) {
1773 DRM_ERROR("Failed initializing gws heap.\n");
1774 return r;
1775 }
1776
1777 r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_OA, adev->gds.oa_size);
1778 if (r) {
1779 DRM_ERROR("Failed initializing oa heap.\n");
1780 return r;
1781 }
1782
1783 return 0;
1784}
1785
1786/*
1787 * amdgpu_ttm_fini - De-initialize the TTM memory pools
1788 */
1789void amdgpu_ttm_fini(struct amdgpu_device *adev)
1790{
1791 if (!adev->mman.initialized)
1792 return;
1793
1794 amdgpu_ttm_training_reserve_vram_fini(adev);
1795 /* return the stolen vga memory back to VRAM */
1796 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
1797 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
1798 /* return the IP Discovery TMR memory back to VRAM */
1799 amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL);
1800 if (adev->mman.stolen_reserved_size)
1801 amdgpu_bo_free_kernel(&adev->mman.stolen_reserved_memory,
1802 NULL, NULL);
1803 amdgpu_ttm_fw_reserve_vram_fini(adev);
1804
1805 amdgpu_vram_mgr_fini(adev);
1806 amdgpu_gtt_mgr_fini(adev);
1807 amdgpu_preempt_mgr_fini(adev);
1808 ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GDS);
1809 ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS);
1810 ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA);
1811 ttm_device_fini(&adev->mman.bdev);
1812 adev->mman.initialized = false;
1813 DRM_INFO("amdgpu: ttm finalized\n");
1814}
1815
1816/**
1817 * amdgpu_ttm_set_buffer_funcs_status - enable/disable use of buffer functions
1818 *
1819 * @adev: amdgpu_device pointer
1820 * @enable: true when we can use buffer functions.
1821 *
1822 * Enable/disable use of buffer functions during suspend/resume. This should
1823 * only be called at bootup or when userspace isn't running.
1824 */
1825void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
1826{
1827 struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
1828 uint64_t size;
1829 int r;
1830
1831 if (!adev->mman.initialized || amdgpu_in_reset(adev) ||
1832 adev->mman.buffer_funcs_enabled == enable)
1833 return;
1834
1835 if (enable) {
1836 struct amdgpu_ring *ring;
1837 struct drm_gpu_scheduler *sched;
1838
1839 ring = adev->mman.buffer_funcs_ring;
1840 sched = &ring->sched;
1841 r = drm_sched_entity_init(&adev->mman.entity,
1842 DRM_SCHED_PRIORITY_KERNEL, &sched,
1843 1, NULL);
1844 if (r) {
1845 DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
1846 r);
1847 return;
1848 }
1849 } else {
1850 drm_sched_entity_destroy(&adev->mman.entity);
1851 dma_fence_put(man->move);
1852 man->move = NULL;
1853 }
1854
1855 /* this just adjusts TTM size idea, which sets lpfn to the correct value */
1856 if (enable)
1857 size = adev->gmc.real_vram_size;
1858 else
1859 size = adev->gmc.visible_vram_size;
1860 man->size = size >> PAGE_SHIFT;
1861 adev->mman.buffer_funcs_enabled = enable;
1862}
1863
1864int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
1865 uint64_t dst_offset, uint32_t byte_count,
1866 struct dma_resv *resv,
1867 struct dma_fence **fence, bool direct_submit,
1868 bool vm_needs_flush, bool tmz)
1869{
1870 enum amdgpu_ib_pool_type pool = direct_submit ? AMDGPU_IB_POOL_DIRECT :
1871 AMDGPU_IB_POOL_DELAYED;
1872 struct amdgpu_device *adev = ring->adev;
1873 struct amdgpu_job *job;
1874
1875 uint32_t max_bytes;
1876 unsigned num_loops, num_dw;
1877 unsigned i;
1878 int r;
1879
1880 if (direct_submit && !ring->sched.ready) {
1881 DRM_ERROR("Trying to move memory with ring turned off.\n");
1882 return -EINVAL;
1883 }
1884
1885 max_bytes = adev->mman.buffer_funcs->copy_max_bytes;
1886 num_loops = DIV_ROUND_UP(byte_count, max_bytes);
1887 num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8);
1888
1889 r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, pool, &job);
1890 if (r)
1891 return r;
1892
1893 if (vm_needs_flush) {
1894 job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo ?
1895 adev->gmc.pdb0_bo : adev->gart.bo);
1896 job->vm_needs_flush = true;
1897 }
1898 if (resv) {
1899 r = amdgpu_sync_resv(adev, &job->sync, resv,
1900 AMDGPU_SYNC_ALWAYS,
1901 AMDGPU_FENCE_OWNER_UNDEFINED);
1902 if (r) {
1903 DRM_ERROR("sync failed (%d).\n", r);
1904 goto error_free;
1905 }
1906 }
1907
1908 for (i = 0; i < num_loops; i++) {
1909 uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
1910
1911 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset,
1912 dst_offset, cur_size_in_bytes, tmz);
1913
1914 src_offset += cur_size_in_bytes;
1915 dst_offset += cur_size_in_bytes;
1916 byte_count -= cur_size_in_bytes;
1917 }
1918
1919 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
1920 WARN_ON(job->ibs[0].length_dw > num_dw);
1921 if (direct_submit)
1922 r = amdgpu_job_submit_direct(job, ring, fence);
1923 else
1924 r = amdgpu_job_submit(job, &adev->mman.entity,
1925 AMDGPU_FENCE_OWNER_UNDEFINED, fence);
1926 if (r)
1927 goto error_free;
1928
1929 return r;
1930
1931error_free:
1932 amdgpu_job_free(job);
1933 DRM_ERROR("Error scheduling IBs (%d)\n", r);
1934 return r;
1935}
1936
1937int amdgpu_fill_buffer(struct amdgpu_bo *bo,
1938 uint32_t src_data,
1939 struct dma_resv *resv,
1940 struct dma_fence **fence)
1941{
1942 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1943 uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
1944 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
1945
1946 struct amdgpu_res_cursor cursor;
1947 unsigned int num_loops, num_dw;
1948 uint64_t num_bytes;
1949
1950 struct amdgpu_job *job;
1951 int r;
1952
1953 if (!adev->mman.buffer_funcs_enabled) {
1954 DRM_ERROR("Trying to clear memory with ring turned off.\n");
1955 return -EINVAL;
1956 }
1957
1958 if (bo->tbo.resource->mem_type == AMDGPU_PL_PREEMPT) {
1959 DRM_ERROR("Trying to clear preemptible memory.\n");
1960 return -EINVAL;
1961 }
1962
1963 if (bo->tbo.resource->mem_type == TTM_PL_TT) {
1964 r = amdgpu_ttm_alloc_gart(&bo->tbo);
1965 if (r)
1966 return r;
1967 }
1968
1969 num_bytes = bo->tbo.resource->num_pages << PAGE_SHIFT;
1970 num_loops = 0;
1971
1972 amdgpu_res_first(bo->tbo.resource, 0, num_bytes, &cursor);
1973 while (cursor.remaining) {
1974 num_loops += DIV_ROUND_UP_ULL(cursor.size, max_bytes);
1975 amdgpu_res_next(&cursor, cursor.size);
1976 }
1977 num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw;
1978
1979 /* for IB padding */
1980 num_dw += 64;
1981
1982 r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, AMDGPU_IB_POOL_DELAYED,
1983 &job);
1984 if (r)
1985 return r;
1986
1987 if (resv) {
1988 r = amdgpu_sync_resv(adev, &job->sync, resv,
1989 AMDGPU_SYNC_ALWAYS,
1990 AMDGPU_FENCE_OWNER_UNDEFINED);
1991 if (r) {
1992 DRM_ERROR("sync failed (%d).\n", r);
1993 goto error_free;
1994 }
1995 }
1996
1997 amdgpu_res_first(bo->tbo.resource, 0, num_bytes, &cursor);
1998 while (cursor.remaining) {
1999 uint32_t cur_size = min_t(uint64_t, cursor.size, max_bytes);
2000 uint64_t dst_addr = cursor.start;
2001
2002 dst_addr += amdgpu_ttm_domain_start(adev,
2003 bo->tbo.resource->mem_type);
2004 amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data, dst_addr,
2005 cur_size);
2006
2007 amdgpu_res_next(&cursor, cur_size);
2008 }
2009
2010 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
2011 WARN_ON(job->ibs[0].length_dw > num_dw);
2012 r = amdgpu_job_submit(job, &adev->mman.entity,
2013 AMDGPU_FENCE_OWNER_UNDEFINED, fence);
2014 if (r)
2015 goto error_free;
2016
2017 return 0;
2018
2019error_free:
2020 amdgpu_job_free(job);
2021 return r;
2022}
2023
2024#if defined(CONFIG_DEBUG_FS)
2025
2026static int amdgpu_mm_vram_table_show(struct seq_file *m, void *unused)
2027{
2028 struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
2029 struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev,
2030 TTM_PL_VRAM);
2031 struct drm_printer p = drm_seq_file_printer(m);
2032
2033 man->func->debug(man, &p);
2034 return 0;
2035}
2036
2037static int amdgpu_ttm_page_pool_show(struct seq_file *m, void *unused)
2038{
2039 struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
2040
2041 return ttm_pool_debugfs(&adev->mman.bdev.pool, m);
2042}
2043
2044static int amdgpu_mm_tt_table_show(struct seq_file *m, void *unused)
2045{
2046 struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
2047 struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev,
2048 TTM_PL_TT);
2049 struct drm_printer p = drm_seq_file_printer(m);
2050
2051 man->func->debug(man, &p);
2052 return 0;
2053}
2054
2055static int amdgpu_mm_gds_table_show(struct seq_file *m, void *unused)
2056{
2057 struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
2058 struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev,
2059 AMDGPU_PL_GDS);
2060 struct drm_printer p = drm_seq_file_printer(m);
2061
2062 man->func->debug(man, &p);
2063 return 0;
2064}
2065
2066static int amdgpu_mm_gws_table_show(struct seq_file *m, void *unused)
2067{
2068 struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
2069 struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev,
2070 AMDGPU_PL_GWS);
2071 struct drm_printer p = drm_seq_file_printer(m);
2072
2073 man->func->debug(man, &p);
2074 return 0;
2075}
2076
2077static int amdgpu_mm_oa_table_show(struct seq_file *m, void *unused)
2078{
2079 struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
2080 struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev,
2081 AMDGPU_PL_OA);
2082 struct drm_printer p = drm_seq_file_printer(m);
2083
2084 man->func->debug(man, &p);
2085 return 0;
2086}
2087
2088DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_vram_table);
2089DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_tt_table);
2090DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_gds_table);
2091DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_gws_table);
2092DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_oa_table);
2093DEFINE_SHOW_ATTRIBUTE(amdgpu_ttm_page_pool);
2094
2095/*
2096 * amdgpu_ttm_vram_read - Linear read access to VRAM
2097 *
2098 * Accesses VRAM via MMIO for debugging purposes.
2099 */
2100static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
2101 size_t size, loff_t *pos)
2102{
2103 struct amdgpu_device *adev = file_inode(f)->i_private;
2104 ssize_t result = 0;
2105
2106 if (size & 0x3 || *pos & 0x3)
2107 return -EINVAL;
2108
2109 if (*pos >= adev->gmc.mc_vram_size)
2110 return -ENXIO;
2111
2112 size = min(size, (size_t)(adev->gmc.mc_vram_size - *pos));
2113 while (size) {
2114 size_t bytes = min(size, AMDGPU_TTM_VRAM_MAX_DW_READ * 4);
2115 uint32_t value[AMDGPU_TTM_VRAM_MAX_DW_READ];
2116
2117 amdgpu_device_vram_access(adev, *pos, value, bytes, false);
2118 if (copy_to_user(buf, value, bytes))
2119 return -EFAULT;
2120
2121 result += bytes;
2122 buf += bytes;
2123 *pos += bytes;
2124 size -= bytes;
2125 }
2126
2127 return result;
2128}
2129
2130/*
2131 * amdgpu_ttm_vram_write - Linear write access to VRAM
2132 *
2133 * Accesses VRAM via MMIO for debugging purposes.
2134 */
2135static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf,
2136 size_t size, loff_t *pos)
2137{
2138 struct amdgpu_device *adev = file_inode(f)->i_private;
2139 ssize_t result = 0;
2140 int r;
2141
2142 if (size & 0x3 || *pos & 0x3)
2143 return -EINVAL;
2144
2145 if (*pos >= adev->gmc.mc_vram_size)
2146 return -ENXIO;
2147
2148 while (size) {
2149 unsigned long flags;
2150 uint32_t value;
2151
2152 if (*pos >= adev->gmc.mc_vram_size)
2153 return result;
2154
2155 r = get_user(value, (uint32_t *)buf);
2156 if (r)
2157 return r;
2158
2159 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
2160 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
2161 WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31);
2162 WREG32_NO_KIQ(mmMM_DATA, value);
2163 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
2164
2165 result += 4;
2166 buf += 4;
2167 *pos += 4;
2168 size -= 4;
2169 }
2170
2171 return result;
2172}
2173
2174static const struct file_operations amdgpu_ttm_vram_fops = {
2175 .owner = THIS_MODULE,
2176 .read = amdgpu_ttm_vram_read,
2177 .write = amdgpu_ttm_vram_write,
2178 .llseek = default_llseek,
2179};
2180
2181/*
2182 * amdgpu_iomem_read - Virtual read access to GPU mapped memory
2183 *
2184 * This function is used to read memory that has been mapped to the
2185 * GPU and the known addresses are not physical addresses but instead
2186 * bus addresses (e.g., what you'd put in an IB or ring buffer).
2187 */
2188static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf,
2189 size_t size, loff_t *pos)
2190{
2191 struct amdgpu_device *adev = file_inode(f)->i_private;
2192 struct iommu_domain *dom;
2193 ssize_t result = 0;
2194 int r;
2195
2196 /* retrieve the IOMMU domain if any for this device */
2197 dom = iommu_get_domain_for_dev(adev->dev);
2198
2199 while (size) {
2200 phys_addr_t addr = *pos & PAGE_MASK;
2201 loff_t off = *pos & ~PAGE_MASK;
2202 size_t bytes = PAGE_SIZE - off;
2203 unsigned long pfn;
2204 struct page *p;
2205 void *ptr;
2206
2207 bytes = bytes < size ? bytes : size;
2208
2209 /* Translate the bus address to a physical address. If
2210 * the domain is NULL it means there is no IOMMU active
2211 * and the address translation is the identity
2212 */
2213 addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
2214
2215 pfn = addr >> PAGE_SHIFT;
2216 if (!pfn_valid(pfn))
2217 return -EPERM;
2218
2219 p = pfn_to_page(pfn);
2220 if (p->mapping != adev->mman.bdev.dev_mapping)
2221 return -EPERM;
2222
2223 ptr = kmap(p);
2224 r = copy_to_user(buf, ptr + off, bytes);
2225 kunmap(p);
2226 if (r)
2227 return -EFAULT;
2228
2229 size -= bytes;
2230 *pos += bytes;
2231 result += bytes;
2232 }
2233
2234 return result;
2235}
2236
2237/*
2238 * amdgpu_iomem_write - Virtual write access to GPU mapped memory
2239 *
2240 * This function is used to write memory that has been mapped to the
2241 * GPU and the known addresses are not physical addresses but instead
2242 * bus addresses (e.g., what you'd put in an IB or ring buffer).
2243 */
2244static ssize_t amdgpu_iomem_write(struct file *f, const char __user *buf,
2245 size_t size, loff_t *pos)
2246{
2247 struct amdgpu_device *adev = file_inode(f)->i_private;
2248 struct iommu_domain *dom;
2249 ssize_t result = 0;
2250 int r;
2251
2252 dom = iommu_get_domain_for_dev(adev->dev);
2253
2254 while (size) {
2255 phys_addr_t addr = *pos & PAGE_MASK;
2256 loff_t off = *pos & ~PAGE_MASK;
2257 size_t bytes = PAGE_SIZE - off;
2258 unsigned long pfn;
2259 struct page *p;
2260 void *ptr;
2261
2262 bytes = bytes < size ? bytes : size;
2263
2264 addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
2265
2266 pfn = addr >> PAGE_SHIFT;
2267 if (!pfn_valid(pfn))
2268 return -EPERM;
2269
2270 p = pfn_to_page(pfn);
2271 if (p->mapping != adev->mman.bdev.dev_mapping)
2272 return -EPERM;
2273
2274 ptr = kmap(p);
2275 r = copy_from_user(ptr + off, buf, bytes);
2276 kunmap(p);
2277 if (r)
2278 return -EFAULT;
2279
2280 size -= bytes;
2281 *pos += bytes;
2282 result += bytes;
2283 }
2284
2285 return result;
2286}
2287
2288static const struct file_operations amdgpu_ttm_iomem_fops = {
2289 .owner = THIS_MODULE,
2290 .read = amdgpu_iomem_read,
2291 .write = amdgpu_iomem_write,
2292 .llseek = default_llseek
2293};
2294
2295#endif
2296
2297void amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
2298{
2299#if defined(CONFIG_DEBUG_FS)
2300 struct drm_minor *minor = adev_to_drm(adev)->primary;
2301 struct dentry *root = minor->debugfs_root;
2302
2303 debugfs_create_file_size("amdgpu_vram", 0444, root, adev,
2304 &amdgpu_ttm_vram_fops, adev->gmc.mc_vram_size);
2305 debugfs_create_file("amdgpu_iomem", 0444, root, adev,
2306 &amdgpu_ttm_iomem_fops);
2307 debugfs_create_file("amdgpu_vram_mm", 0444, root, adev,
2308 &amdgpu_mm_vram_table_fops);
2309 debugfs_create_file("amdgpu_gtt_mm", 0444, root, adev,
2310 &amdgpu_mm_tt_table_fops);
2311 debugfs_create_file("amdgpu_gds_mm", 0444, root, adev,
2312 &amdgpu_mm_gds_table_fops);
2313 debugfs_create_file("amdgpu_gws_mm", 0444, root, adev,
2314 &amdgpu_mm_gws_table_fops);
2315 debugfs_create_file("amdgpu_oa_mm", 0444, root, adev,
2316 &amdgpu_mm_oa_table_fops);
2317 debugfs_create_file("ttm_page_pool", 0444, root, adev,
2318 &amdgpu_ttm_page_pool_fops);
2319#endif
2320}
1/*
2 * Copyright 2009 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30 * Dave Airlie
31 */
32#include <ttm/ttm_bo_api.h>
33#include <ttm/ttm_bo_driver.h>
34#include <ttm/ttm_placement.h>
35#include <ttm/ttm_module.h>
36#include <ttm/ttm_page_alloc.h>
37#include <drm/drmP.h>
38#include <drm/amdgpu_drm.h>
39#include <linux/seq_file.h>
40#include <linux/slab.h>
41#include <linux/swiotlb.h>
42#include <linux/swap.h>
43#include <linux/pagemap.h>
44#include <linux/debugfs.h>
45#include "amdgpu.h"
46#include "bif/bif_4_1_d.h"
47
48#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
49
50static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
51static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev);
52
53
54/*
55 * Global memory.
56 */
57static int amdgpu_ttm_mem_global_init(struct drm_global_reference *ref)
58{
59 return ttm_mem_global_init(ref->object);
60}
61
62static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref)
63{
64 ttm_mem_global_release(ref->object);
65}
66
67static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
68{
69 struct drm_global_reference *global_ref;
70 struct amdgpu_ring *ring;
71 struct amd_sched_rq *rq;
72 int r;
73
74 adev->mman.mem_global_referenced = false;
75 global_ref = &adev->mman.mem_global_ref;
76 global_ref->global_type = DRM_GLOBAL_TTM_MEM;
77 global_ref->size = sizeof(struct ttm_mem_global);
78 global_ref->init = &amdgpu_ttm_mem_global_init;
79 global_ref->release = &amdgpu_ttm_mem_global_release;
80 r = drm_global_item_ref(global_ref);
81 if (r) {
82 DRM_ERROR("Failed setting up TTM memory accounting "
83 "subsystem.\n");
84 goto error_mem;
85 }
86
87 adev->mman.bo_global_ref.mem_glob =
88 adev->mman.mem_global_ref.object;
89 global_ref = &adev->mman.bo_global_ref.ref;
90 global_ref->global_type = DRM_GLOBAL_TTM_BO;
91 global_ref->size = sizeof(struct ttm_bo_global);
92 global_ref->init = &ttm_bo_global_init;
93 global_ref->release = &ttm_bo_global_release;
94 r = drm_global_item_ref(global_ref);
95 if (r) {
96 DRM_ERROR("Failed setting up TTM BO subsystem.\n");
97 goto error_bo;
98 }
99
100 ring = adev->mman.buffer_funcs_ring;
101 rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
102 r = amd_sched_entity_init(&ring->sched, &adev->mman.entity,
103 rq, amdgpu_sched_jobs);
104 if (r) {
105 DRM_ERROR("Failed setting up TTM BO move run queue.\n");
106 goto error_entity;
107 }
108
109 adev->mman.mem_global_referenced = true;
110
111 return 0;
112
113error_entity:
114 drm_global_item_unref(&adev->mman.bo_global_ref.ref);
115error_bo:
116 drm_global_item_unref(&adev->mman.mem_global_ref);
117error_mem:
118 return r;
119}
120
121static void amdgpu_ttm_global_fini(struct amdgpu_device *adev)
122{
123 if (adev->mman.mem_global_referenced) {
124 amd_sched_entity_fini(adev->mman.entity.sched,
125 &adev->mman.entity);
126 drm_global_item_unref(&adev->mman.bo_global_ref.ref);
127 drm_global_item_unref(&adev->mman.mem_global_ref);
128 adev->mman.mem_global_referenced = false;
129 }
130}
131
132static int amdgpu_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
133{
134 return 0;
135}
136
137static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
138 struct ttm_mem_type_manager *man)
139{
140 struct amdgpu_device *adev;
141
142 adev = amdgpu_ttm_adev(bdev);
143
144 switch (type) {
145 case TTM_PL_SYSTEM:
146 /* System memory */
147 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
148 man->available_caching = TTM_PL_MASK_CACHING;
149 man->default_caching = TTM_PL_FLAG_CACHED;
150 break;
151 case TTM_PL_TT:
152 man->func = &amdgpu_gtt_mgr_func;
153 man->gpu_offset = adev->mc.gtt_start;
154 man->available_caching = TTM_PL_MASK_CACHING;
155 man->default_caching = TTM_PL_FLAG_CACHED;
156 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
157 break;
158 case TTM_PL_VRAM:
159 /* "On-card" video ram */
160 man->func = &amdgpu_vram_mgr_func;
161 man->gpu_offset = adev->mc.vram_start;
162 man->flags = TTM_MEMTYPE_FLAG_FIXED |
163 TTM_MEMTYPE_FLAG_MAPPABLE;
164 man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
165 man->default_caching = TTM_PL_FLAG_WC;
166 break;
167 case AMDGPU_PL_GDS:
168 case AMDGPU_PL_GWS:
169 case AMDGPU_PL_OA:
170 /* On-chip GDS memory*/
171 man->func = &ttm_bo_manager_func;
172 man->gpu_offset = 0;
173 man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_CMA;
174 man->available_caching = TTM_PL_FLAG_UNCACHED;
175 man->default_caching = TTM_PL_FLAG_UNCACHED;
176 break;
177 default:
178 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
179 return -EINVAL;
180 }
181 return 0;
182}
183
184static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
185 struct ttm_placement *placement)
186{
187 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
188 struct amdgpu_bo *abo;
189 static struct ttm_place placements = {
190 .fpfn = 0,
191 .lpfn = 0,
192 .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
193 };
194 unsigned i;
195
196 if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) {
197 placement->placement = &placements;
198 placement->busy_placement = &placements;
199 placement->num_placement = 1;
200 placement->num_busy_placement = 1;
201 return;
202 }
203 abo = container_of(bo, struct amdgpu_bo, tbo);
204 switch (bo->mem.mem_type) {
205 case TTM_PL_VRAM:
206 if (adev->mman.buffer_funcs_ring->ready == false) {
207 amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
208 } else {
209 amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
210 for (i = 0; i < abo->placement.num_placement; ++i) {
211 if (!(abo->placements[i].flags &
212 TTM_PL_FLAG_TT))
213 continue;
214
215 if (abo->placements[i].lpfn)
216 continue;
217
218 /* set an upper limit to force directly
219 * allocating address space for the BO.
220 */
221 abo->placements[i].lpfn =
222 adev->mc.gtt_size >> PAGE_SHIFT;
223 }
224 }
225 break;
226 case TTM_PL_TT:
227 default:
228 amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
229 }
230 *placement = abo->placement;
231}
232
233static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
234{
235 struct amdgpu_bo *abo = container_of(bo, struct amdgpu_bo, tbo);
236
237 if (amdgpu_ttm_tt_get_usermm(bo->ttm))
238 return -EPERM;
239 return drm_vma_node_verify_access(&abo->gem_base.vma_node,
240 filp->private_data);
241}
242
243static void amdgpu_move_null(struct ttm_buffer_object *bo,
244 struct ttm_mem_reg *new_mem)
245{
246 struct ttm_mem_reg *old_mem = &bo->mem;
247
248 BUG_ON(old_mem->mm_node != NULL);
249 *old_mem = *new_mem;
250 new_mem->mm_node = NULL;
251}
252
253static int amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
254 struct drm_mm_node *mm_node,
255 struct ttm_mem_reg *mem,
256 uint64_t *addr)
257{
258 int r;
259
260 switch (mem->mem_type) {
261 case TTM_PL_TT:
262 r = amdgpu_ttm_bind(bo, mem);
263 if (r)
264 return r;
265
266 case TTM_PL_VRAM:
267 *addr = mm_node->start << PAGE_SHIFT;
268 *addr += bo->bdev->man[mem->mem_type].gpu_offset;
269 break;
270 default:
271 DRM_ERROR("Unknown placement %d\n", mem->mem_type);
272 return -EINVAL;
273 }
274
275 return 0;
276}
277
278static int amdgpu_move_blit(struct ttm_buffer_object *bo,
279 bool evict, bool no_wait_gpu,
280 struct ttm_mem_reg *new_mem,
281 struct ttm_mem_reg *old_mem)
282{
283 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
284 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
285
286 struct drm_mm_node *old_mm, *new_mm;
287 uint64_t old_start, old_size, new_start, new_size;
288 unsigned long num_pages;
289 struct dma_fence *fence = NULL;
290 int r;
291
292 BUILD_BUG_ON((PAGE_SIZE % AMDGPU_GPU_PAGE_SIZE) != 0);
293
294 if (!ring->ready) {
295 DRM_ERROR("Trying to move memory with ring turned off.\n");
296 return -EINVAL;
297 }
298
299 old_mm = old_mem->mm_node;
300 r = amdgpu_mm_node_addr(bo, old_mm, old_mem, &old_start);
301 if (r)
302 return r;
303 old_size = old_mm->size;
304
305
306 new_mm = new_mem->mm_node;
307 r = amdgpu_mm_node_addr(bo, new_mm, new_mem, &new_start);
308 if (r)
309 return r;
310 new_size = new_mm->size;
311
312 num_pages = new_mem->num_pages;
313 while (num_pages) {
314 unsigned long cur_pages = min(old_size, new_size);
315 struct dma_fence *next;
316
317 r = amdgpu_copy_buffer(ring, old_start, new_start,
318 cur_pages * PAGE_SIZE,
319 bo->resv, &next, false);
320 if (r)
321 goto error;
322
323 dma_fence_put(fence);
324 fence = next;
325
326 num_pages -= cur_pages;
327 if (!num_pages)
328 break;
329
330 old_size -= cur_pages;
331 if (!old_size) {
332 r = amdgpu_mm_node_addr(bo, ++old_mm, old_mem,
333 &old_start);
334 if (r)
335 goto error;
336 old_size = old_mm->size;
337 } else {
338 old_start += cur_pages * PAGE_SIZE;
339 }
340
341 new_size -= cur_pages;
342 if (!new_size) {
343 r = amdgpu_mm_node_addr(bo, ++new_mm, new_mem,
344 &new_start);
345 if (r)
346 goto error;
347
348 new_size = new_mm->size;
349 } else {
350 new_start += cur_pages * PAGE_SIZE;
351 }
352 }
353
354 r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
355 dma_fence_put(fence);
356 return r;
357
358error:
359 if (fence)
360 dma_fence_wait(fence, false);
361 dma_fence_put(fence);
362 return r;
363}
364
365static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo,
366 bool evict, bool interruptible,
367 bool no_wait_gpu,
368 struct ttm_mem_reg *new_mem)
369{
370 struct amdgpu_device *adev;
371 struct ttm_mem_reg *old_mem = &bo->mem;
372 struct ttm_mem_reg tmp_mem;
373 struct ttm_place placements;
374 struct ttm_placement placement;
375 int r;
376
377 adev = amdgpu_ttm_adev(bo->bdev);
378 tmp_mem = *new_mem;
379 tmp_mem.mm_node = NULL;
380 placement.num_placement = 1;
381 placement.placement = &placements;
382 placement.num_busy_placement = 1;
383 placement.busy_placement = &placements;
384 placements.fpfn = 0;
385 placements.lpfn = adev->mc.gtt_size >> PAGE_SHIFT;
386 placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
387 r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
388 interruptible, no_wait_gpu);
389 if (unlikely(r)) {
390 return r;
391 }
392
393 r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
394 if (unlikely(r)) {
395 goto out_cleanup;
396 }
397
398 r = ttm_tt_bind(bo->ttm, &tmp_mem);
399 if (unlikely(r)) {
400 goto out_cleanup;
401 }
402 r = amdgpu_move_blit(bo, true, no_wait_gpu, &tmp_mem, old_mem);
403 if (unlikely(r)) {
404 goto out_cleanup;
405 }
406 r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, new_mem);
407out_cleanup:
408 ttm_bo_mem_put(bo, &tmp_mem);
409 return r;
410}
411
412static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo,
413 bool evict, bool interruptible,
414 bool no_wait_gpu,
415 struct ttm_mem_reg *new_mem)
416{
417 struct amdgpu_device *adev;
418 struct ttm_mem_reg *old_mem = &bo->mem;
419 struct ttm_mem_reg tmp_mem;
420 struct ttm_placement placement;
421 struct ttm_place placements;
422 int r;
423
424 adev = amdgpu_ttm_adev(bo->bdev);
425 tmp_mem = *new_mem;
426 tmp_mem.mm_node = NULL;
427 placement.num_placement = 1;
428 placement.placement = &placements;
429 placement.num_busy_placement = 1;
430 placement.busy_placement = &placements;
431 placements.fpfn = 0;
432 placements.lpfn = adev->mc.gtt_size >> PAGE_SHIFT;
433 placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
434 r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
435 interruptible, no_wait_gpu);
436 if (unlikely(r)) {
437 return r;
438 }
439 r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, &tmp_mem);
440 if (unlikely(r)) {
441 goto out_cleanup;
442 }
443 r = amdgpu_move_blit(bo, true, no_wait_gpu, new_mem, old_mem);
444 if (unlikely(r)) {
445 goto out_cleanup;
446 }
447out_cleanup:
448 ttm_bo_mem_put(bo, &tmp_mem);
449 return r;
450}
451
452static int amdgpu_bo_move(struct ttm_buffer_object *bo,
453 bool evict, bool interruptible,
454 bool no_wait_gpu,
455 struct ttm_mem_reg *new_mem)
456{
457 struct amdgpu_device *adev;
458 struct amdgpu_bo *abo;
459 struct ttm_mem_reg *old_mem = &bo->mem;
460 int r;
461
462 /* Can't move a pinned BO */
463 abo = container_of(bo, struct amdgpu_bo, tbo);
464 if (WARN_ON_ONCE(abo->pin_count > 0))
465 return -EINVAL;
466
467 adev = amdgpu_ttm_adev(bo->bdev);
468
469 /* remember the eviction */
470 if (evict)
471 atomic64_inc(&adev->num_evictions);
472
473 if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
474 amdgpu_move_null(bo, new_mem);
475 return 0;
476 }
477 if ((old_mem->mem_type == TTM_PL_TT &&
478 new_mem->mem_type == TTM_PL_SYSTEM) ||
479 (old_mem->mem_type == TTM_PL_SYSTEM &&
480 new_mem->mem_type == TTM_PL_TT)) {
481 /* bind is enough */
482 amdgpu_move_null(bo, new_mem);
483 return 0;
484 }
485 if (adev->mman.buffer_funcs == NULL ||
486 adev->mman.buffer_funcs_ring == NULL ||
487 !adev->mman.buffer_funcs_ring->ready) {
488 /* use memcpy */
489 goto memcpy;
490 }
491
492 if (old_mem->mem_type == TTM_PL_VRAM &&
493 new_mem->mem_type == TTM_PL_SYSTEM) {
494 r = amdgpu_move_vram_ram(bo, evict, interruptible,
495 no_wait_gpu, new_mem);
496 } else if (old_mem->mem_type == TTM_PL_SYSTEM &&
497 new_mem->mem_type == TTM_PL_VRAM) {
498 r = amdgpu_move_ram_vram(bo, evict, interruptible,
499 no_wait_gpu, new_mem);
500 } else {
501 r = amdgpu_move_blit(bo, evict, no_wait_gpu, new_mem, old_mem);
502 }
503
504 if (r) {
505memcpy:
506 r = ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, new_mem);
507 if (r) {
508 return r;
509 }
510 }
511
512 /* update statistics */
513 atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved);
514 return 0;
515}
516
517static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
518{
519 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
520 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
521
522 mem->bus.addr = NULL;
523 mem->bus.offset = 0;
524 mem->bus.size = mem->num_pages << PAGE_SHIFT;
525 mem->bus.base = 0;
526 mem->bus.is_iomem = false;
527 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
528 return -EINVAL;
529 switch (mem->mem_type) {
530 case TTM_PL_SYSTEM:
531 /* system memory */
532 return 0;
533 case TTM_PL_TT:
534 break;
535 case TTM_PL_VRAM:
536 mem->bus.offset = mem->start << PAGE_SHIFT;
537 /* check if it's visible */
538 if ((mem->bus.offset + mem->bus.size) > adev->mc.visible_vram_size)
539 return -EINVAL;
540 mem->bus.base = adev->mc.aper_base;
541 mem->bus.is_iomem = true;
542#ifdef __alpha__
543 /*
544 * Alpha: use bus.addr to hold the ioremap() return,
545 * so we can modify bus.base below.
546 */
547 if (mem->placement & TTM_PL_FLAG_WC)
548 mem->bus.addr =
549 ioremap_wc(mem->bus.base + mem->bus.offset,
550 mem->bus.size);
551 else
552 mem->bus.addr =
553 ioremap_nocache(mem->bus.base + mem->bus.offset,
554 mem->bus.size);
555
556 /*
557 * Alpha: Use just the bus offset plus
558 * the hose/domain memory base for bus.base.
559 * It then can be used to build PTEs for VRAM
560 * access, as done in ttm_bo_vm_fault().
561 */
562 mem->bus.base = (mem->bus.base & 0x0ffffffffUL) +
563 adev->ddev->hose->dense_mem_base;
564#endif
565 break;
566 default:
567 return -EINVAL;
568 }
569 return 0;
570}
571
572static void amdgpu_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
573{
574}
575
576/*
577 * TTM backend functions.
578 */
579struct amdgpu_ttm_gup_task_list {
580 struct list_head list;
581 struct task_struct *task;
582};
583
584struct amdgpu_ttm_tt {
585 struct ttm_dma_tt ttm;
586 struct amdgpu_device *adev;
587 u64 offset;
588 uint64_t userptr;
589 struct mm_struct *usermm;
590 uint32_t userflags;
591 spinlock_t guptasklock;
592 struct list_head guptasks;
593 atomic_t mmu_invalidations;
594 struct list_head list;
595};
596
597int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
598{
599 struct amdgpu_ttm_tt *gtt = (void *)ttm;
600 unsigned int flags = 0;
601 unsigned pinned = 0;
602 int r;
603
604 if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
605 flags |= FOLL_WRITE;
606
607 if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) {
608 /* check that we only use anonymous memory
609 to prevent problems with writeback */
610 unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE;
611 struct vm_area_struct *vma;
612
613 vma = find_vma(gtt->usermm, gtt->userptr);
614 if (!vma || vma->vm_file || vma->vm_end < end)
615 return -EPERM;
616 }
617
618 do {
619 unsigned num_pages = ttm->num_pages - pinned;
620 uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
621 struct page **p = pages + pinned;
622 struct amdgpu_ttm_gup_task_list guptask;
623
624 guptask.task = current;
625 spin_lock(>t->guptasklock);
626 list_add(&guptask.list, >t->guptasks);
627 spin_unlock(>t->guptasklock);
628
629 r = get_user_pages(userptr, num_pages, flags, p, NULL);
630
631 spin_lock(>t->guptasklock);
632 list_del(&guptask.list);
633 spin_unlock(>t->guptasklock);
634
635 if (r < 0)
636 goto release_pages;
637
638 pinned += r;
639
640 } while (pinned < ttm->num_pages);
641
642 return 0;
643
644release_pages:
645 release_pages(pages, pinned, 0);
646 return r;
647}
648
649/* prepare the sg table with the user pages */
650static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
651{
652 struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
653 struct amdgpu_ttm_tt *gtt = (void *)ttm;
654 unsigned nents;
655 int r;
656
657 int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
658 enum dma_data_direction direction = write ?
659 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
660
661 r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
662 ttm->num_pages << PAGE_SHIFT,
663 GFP_KERNEL);
664 if (r)
665 goto release_sg;
666
667 r = -ENOMEM;
668 nents = dma_map_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
669 if (nents != ttm->sg->nents)
670 goto release_sg;
671
672 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
673 gtt->ttm.dma_address, ttm->num_pages);
674
675 return 0;
676
677release_sg:
678 kfree(ttm->sg);
679 return r;
680}
681
682static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
683{
684 struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
685 struct amdgpu_ttm_tt *gtt = (void *)ttm;
686 struct sg_page_iter sg_iter;
687
688 int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
689 enum dma_data_direction direction = write ?
690 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
691
692 /* double check that we don't free the table twice */
693 if (!ttm->sg->sgl)
694 return;
695
696 /* free the sg table and pages again */
697 dma_unmap_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
698
699 for_each_sg_page(ttm->sg->sgl, &sg_iter, ttm->sg->nents, 0) {
700 struct page *page = sg_page_iter_page(&sg_iter);
701 if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
702 set_page_dirty(page);
703
704 mark_page_accessed(page);
705 put_page(page);
706 }
707
708 sg_free_table(ttm->sg);
709}
710
711static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
712 struct ttm_mem_reg *bo_mem)
713{
714 struct amdgpu_ttm_tt *gtt = (void*)ttm;
715 int r;
716
717 if (gtt->userptr) {
718 r = amdgpu_ttm_tt_pin_userptr(ttm);
719 if (r) {
720 DRM_ERROR("failed to pin userptr\n");
721 return r;
722 }
723 }
724 if (!ttm->num_pages) {
725 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
726 ttm->num_pages, bo_mem, ttm);
727 }
728
729 if (bo_mem->mem_type == AMDGPU_PL_GDS ||
730 bo_mem->mem_type == AMDGPU_PL_GWS ||
731 bo_mem->mem_type == AMDGPU_PL_OA)
732 return -EINVAL;
733
734 return 0;
735}
736
737bool amdgpu_ttm_is_bound(struct ttm_tt *ttm)
738{
739 struct amdgpu_ttm_tt *gtt = (void *)ttm;
740
741 return gtt && !list_empty(>t->list);
742}
743
744int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem)
745{
746 struct ttm_tt *ttm = bo->ttm;
747 struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
748 uint32_t flags;
749 int r;
750
751 if (!ttm || amdgpu_ttm_is_bound(ttm))
752 return 0;
753
754 r = amdgpu_gtt_mgr_alloc(&bo->bdev->man[TTM_PL_TT], bo,
755 NULL, bo_mem);
756 if (r) {
757 DRM_ERROR("Failed to allocate GTT address space (%d)\n", r);
758 return r;
759 }
760
761 flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem);
762 gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
763 r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages,
764 ttm->pages, gtt->ttm.dma_address, flags);
765
766 if (r) {
767 DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
768 ttm->num_pages, gtt->offset);
769 return r;
770 }
771 spin_lock(>t->adev->gtt_list_lock);
772 list_add_tail(>t->list, >t->adev->gtt_list);
773 spin_unlock(>t->adev->gtt_list_lock);
774 return 0;
775}
776
777int amdgpu_ttm_recover_gart(struct amdgpu_device *adev)
778{
779 struct amdgpu_ttm_tt *gtt, *tmp;
780 struct ttm_mem_reg bo_mem;
781 uint32_t flags;
782 int r;
783
784 bo_mem.mem_type = TTM_PL_TT;
785 spin_lock(&adev->gtt_list_lock);
786 list_for_each_entry_safe(gtt, tmp, &adev->gtt_list, list) {
787 flags = amdgpu_ttm_tt_pte_flags(gtt->adev, >t->ttm.ttm, &bo_mem);
788 r = amdgpu_gart_bind(adev, gtt->offset, gtt->ttm.ttm.num_pages,
789 gtt->ttm.ttm.pages, gtt->ttm.dma_address,
790 flags);
791 if (r) {
792 spin_unlock(&adev->gtt_list_lock);
793 DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
794 gtt->ttm.ttm.num_pages, gtt->offset);
795 return r;
796 }
797 }
798 spin_unlock(&adev->gtt_list_lock);
799 return 0;
800}
801
802static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
803{
804 struct amdgpu_ttm_tt *gtt = (void *)ttm;
805
806 if (gtt->userptr)
807 amdgpu_ttm_tt_unpin_userptr(ttm);
808
809 if (!amdgpu_ttm_is_bound(ttm))
810 return 0;
811
812 /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
813 if (gtt->adev->gart.ready)
814 amdgpu_gart_unbind(gtt->adev, gtt->offset, ttm->num_pages);
815
816 spin_lock(>t->adev->gtt_list_lock);
817 list_del_init(>t->list);
818 spin_unlock(>t->adev->gtt_list_lock);
819
820 return 0;
821}
822
823static void amdgpu_ttm_backend_destroy(struct ttm_tt *ttm)
824{
825 struct amdgpu_ttm_tt *gtt = (void *)ttm;
826
827 ttm_dma_tt_fini(>t->ttm);
828 kfree(gtt);
829}
830
831static struct ttm_backend_func amdgpu_backend_func = {
832 .bind = &amdgpu_ttm_backend_bind,
833 .unbind = &amdgpu_ttm_backend_unbind,
834 .destroy = &amdgpu_ttm_backend_destroy,
835};
836
837static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev,
838 unsigned long size, uint32_t page_flags,
839 struct page *dummy_read_page)
840{
841 struct amdgpu_device *adev;
842 struct amdgpu_ttm_tt *gtt;
843
844 adev = amdgpu_ttm_adev(bdev);
845
846 gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
847 if (gtt == NULL) {
848 return NULL;
849 }
850 gtt->ttm.ttm.func = &amdgpu_backend_func;
851 gtt->adev = adev;
852 if (ttm_dma_tt_init(>t->ttm, bdev, size, page_flags, dummy_read_page)) {
853 kfree(gtt);
854 return NULL;
855 }
856 INIT_LIST_HEAD(>t->list);
857 return >t->ttm.ttm;
858}
859
860static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
861{
862 struct amdgpu_device *adev;
863 struct amdgpu_ttm_tt *gtt = (void *)ttm;
864 unsigned i;
865 int r;
866 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
867
868 if (ttm->state != tt_unpopulated)
869 return 0;
870
871 if (gtt && gtt->userptr) {
872 ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
873 if (!ttm->sg)
874 return -ENOMEM;
875
876 ttm->page_flags |= TTM_PAGE_FLAG_SG;
877 ttm->state = tt_unbound;
878 return 0;
879 }
880
881 if (slave && ttm->sg) {
882 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
883 gtt->ttm.dma_address, ttm->num_pages);
884 ttm->state = tt_unbound;
885 return 0;
886 }
887
888 adev = amdgpu_ttm_adev(ttm->bdev);
889
890#ifdef CONFIG_SWIOTLB
891 if (swiotlb_nr_tbl()) {
892 return ttm_dma_populate(>t->ttm, adev->dev);
893 }
894#endif
895
896 r = ttm_pool_populate(ttm);
897 if (r) {
898 return r;
899 }
900
901 for (i = 0; i < ttm->num_pages; i++) {
902 gtt->ttm.dma_address[i] = pci_map_page(adev->pdev, ttm->pages[i],
903 0, PAGE_SIZE,
904 PCI_DMA_BIDIRECTIONAL);
905 if (pci_dma_mapping_error(adev->pdev, gtt->ttm.dma_address[i])) {
906 while (i--) {
907 pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i],
908 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
909 gtt->ttm.dma_address[i] = 0;
910 }
911 ttm_pool_unpopulate(ttm);
912 return -EFAULT;
913 }
914 }
915 return 0;
916}
917
918static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
919{
920 struct amdgpu_device *adev;
921 struct amdgpu_ttm_tt *gtt = (void *)ttm;
922 unsigned i;
923 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
924
925 if (gtt && gtt->userptr) {
926 kfree(ttm->sg);
927 ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
928 return;
929 }
930
931 if (slave)
932 return;
933
934 adev = amdgpu_ttm_adev(ttm->bdev);
935
936#ifdef CONFIG_SWIOTLB
937 if (swiotlb_nr_tbl()) {
938 ttm_dma_unpopulate(>t->ttm, adev->dev);
939 return;
940 }
941#endif
942
943 for (i = 0; i < ttm->num_pages; i++) {
944 if (gtt->ttm.dma_address[i]) {
945 pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i],
946 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
947 }
948 }
949
950 ttm_pool_unpopulate(ttm);
951}
952
953int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
954 uint32_t flags)
955{
956 struct amdgpu_ttm_tt *gtt = (void *)ttm;
957
958 if (gtt == NULL)
959 return -EINVAL;
960
961 gtt->userptr = addr;
962 gtt->usermm = current->mm;
963 gtt->userflags = flags;
964 spin_lock_init(>t->guptasklock);
965 INIT_LIST_HEAD(>t->guptasks);
966 atomic_set(>t->mmu_invalidations, 0);
967
968 return 0;
969}
970
971struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
972{
973 struct amdgpu_ttm_tt *gtt = (void *)ttm;
974
975 if (gtt == NULL)
976 return NULL;
977
978 return gtt->usermm;
979}
980
981bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
982 unsigned long end)
983{
984 struct amdgpu_ttm_tt *gtt = (void *)ttm;
985 struct amdgpu_ttm_gup_task_list *entry;
986 unsigned long size;
987
988 if (gtt == NULL || !gtt->userptr)
989 return false;
990
991 size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE;
992 if (gtt->userptr > end || gtt->userptr + size <= start)
993 return false;
994
995 spin_lock(>t->guptasklock);
996 list_for_each_entry(entry, >t->guptasks, list) {
997 if (entry->task == current) {
998 spin_unlock(>t->guptasklock);
999 return false;
1000 }
1001 }
1002 spin_unlock(>t->guptasklock);
1003
1004 atomic_inc(>t->mmu_invalidations);
1005
1006 return true;
1007}
1008
1009bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
1010 int *last_invalidated)
1011{
1012 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1013 int prev_invalidated = *last_invalidated;
1014
1015 *last_invalidated = atomic_read(>t->mmu_invalidations);
1016 return prev_invalidated != *last_invalidated;
1017}
1018
1019bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
1020{
1021 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1022
1023 if (gtt == NULL)
1024 return false;
1025
1026 return !!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
1027}
1028
1029uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
1030 struct ttm_mem_reg *mem)
1031{
1032 uint32_t flags = 0;
1033
1034 if (mem && mem->mem_type != TTM_PL_SYSTEM)
1035 flags |= AMDGPU_PTE_VALID;
1036
1037 if (mem && mem->mem_type == TTM_PL_TT) {
1038 flags |= AMDGPU_PTE_SYSTEM;
1039
1040 if (ttm->caching_state == tt_cached)
1041 flags |= AMDGPU_PTE_SNOOPED;
1042 }
1043
1044 if (adev->asic_type >= CHIP_TONGA)
1045 flags |= AMDGPU_PTE_EXECUTABLE;
1046
1047 flags |= AMDGPU_PTE_READABLE;
1048
1049 if (!amdgpu_ttm_tt_is_readonly(ttm))
1050 flags |= AMDGPU_PTE_WRITEABLE;
1051
1052 return flags;
1053}
1054
1055static void amdgpu_ttm_lru_removal(struct ttm_buffer_object *tbo)
1056{
1057 struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
1058 unsigned i, j;
1059
1060 for (i = 0; i < AMDGPU_TTM_LRU_SIZE; ++i) {
1061 struct amdgpu_mman_lru *lru = &adev->mman.log2_size[i];
1062
1063 for (j = 0; j < TTM_NUM_MEM_TYPES; ++j)
1064 if (&tbo->lru == lru->lru[j])
1065 lru->lru[j] = tbo->lru.prev;
1066
1067 if (&tbo->swap == lru->swap_lru)
1068 lru->swap_lru = tbo->swap.prev;
1069 }
1070}
1071
1072static struct amdgpu_mman_lru *amdgpu_ttm_lru(struct ttm_buffer_object *tbo)
1073{
1074 struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
1075 unsigned log2_size = min(ilog2(tbo->num_pages),
1076 AMDGPU_TTM_LRU_SIZE - 1);
1077
1078 return &adev->mman.log2_size[log2_size];
1079}
1080
1081static struct list_head *amdgpu_ttm_lru_tail(struct ttm_buffer_object *tbo)
1082{
1083 struct amdgpu_mman_lru *lru = amdgpu_ttm_lru(tbo);
1084 struct list_head *res = lru->lru[tbo->mem.mem_type];
1085
1086 lru->lru[tbo->mem.mem_type] = &tbo->lru;
1087 while ((++lru)->lru[tbo->mem.mem_type] == res)
1088 lru->lru[tbo->mem.mem_type] = &tbo->lru;
1089
1090 return res;
1091}
1092
1093static struct list_head *amdgpu_ttm_swap_lru_tail(struct ttm_buffer_object *tbo)
1094{
1095 struct amdgpu_mman_lru *lru = amdgpu_ttm_lru(tbo);
1096 struct list_head *res = lru->swap_lru;
1097
1098 lru->swap_lru = &tbo->swap;
1099 while ((++lru)->swap_lru == res)
1100 lru->swap_lru = &tbo->swap;
1101
1102 return res;
1103}
1104
1105static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
1106 const struct ttm_place *place)
1107{
1108 if (bo->mem.mem_type == TTM_PL_VRAM &&
1109 bo->mem.start == AMDGPU_BO_INVALID_OFFSET) {
1110 unsigned long num_pages = bo->mem.num_pages;
1111 struct drm_mm_node *node = bo->mem.mm_node;
1112
1113 /* Check each drm MM node individually */
1114 while (num_pages) {
1115 if (place->fpfn < (node->start + node->size) &&
1116 !(place->lpfn && place->lpfn <= node->start))
1117 return true;
1118
1119 num_pages -= node->size;
1120 ++node;
1121 }
1122
1123 return false;
1124 }
1125
1126 return ttm_bo_eviction_valuable(bo, place);
1127}
1128
1129static struct ttm_bo_driver amdgpu_bo_driver = {
1130 .ttm_tt_create = &amdgpu_ttm_tt_create,
1131 .ttm_tt_populate = &amdgpu_ttm_tt_populate,
1132 .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
1133 .invalidate_caches = &amdgpu_invalidate_caches,
1134 .init_mem_type = &amdgpu_init_mem_type,
1135 .eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
1136 .evict_flags = &amdgpu_evict_flags,
1137 .move = &amdgpu_bo_move,
1138 .verify_access = &amdgpu_verify_access,
1139 .move_notify = &amdgpu_bo_move_notify,
1140 .fault_reserve_notify = &amdgpu_bo_fault_reserve_notify,
1141 .io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
1142 .io_mem_free = &amdgpu_ttm_io_mem_free,
1143 .lru_removal = &amdgpu_ttm_lru_removal,
1144 .lru_tail = &amdgpu_ttm_lru_tail,
1145 .swap_lru_tail = &amdgpu_ttm_swap_lru_tail,
1146};
1147
1148int amdgpu_ttm_init(struct amdgpu_device *adev)
1149{
1150 unsigned i, j;
1151 int r;
1152
1153 r = amdgpu_ttm_global_init(adev);
1154 if (r) {
1155 return r;
1156 }
1157 /* No others user of address space so set it to 0 */
1158 r = ttm_bo_device_init(&adev->mman.bdev,
1159 adev->mman.bo_global_ref.ref.object,
1160 &amdgpu_bo_driver,
1161 adev->ddev->anon_inode->i_mapping,
1162 DRM_FILE_PAGE_OFFSET,
1163 adev->need_dma32);
1164 if (r) {
1165 DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
1166 return r;
1167 }
1168
1169 for (i = 0; i < AMDGPU_TTM_LRU_SIZE; ++i) {
1170 struct amdgpu_mman_lru *lru = &adev->mman.log2_size[i];
1171
1172 for (j = 0; j < TTM_NUM_MEM_TYPES; ++j)
1173 lru->lru[j] = &adev->mman.bdev.man[j].lru;
1174 lru->swap_lru = &adev->mman.bdev.glob->swap_lru;
1175 }
1176
1177 for (j = 0; j < TTM_NUM_MEM_TYPES; ++j)
1178 adev->mman.guard.lru[j] = NULL;
1179 adev->mman.guard.swap_lru = NULL;
1180
1181 adev->mman.initialized = true;
1182 r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM,
1183 adev->mc.real_vram_size >> PAGE_SHIFT);
1184 if (r) {
1185 DRM_ERROR("Failed initializing VRAM heap.\n");
1186 return r;
1187 }
1188 /* Change the size here instead of the init above so only lpfn is affected */
1189 amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
1190
1191 r = amdgpu_bo_create(adev, 256 * 1024, PAGE_SIZE, true,
1192 AMDGPU_GEM_DOMAIN_VRAM,
1193 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
1194 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
1195 NULL, NULL, &adev->stollen_vga_memory);
1196 if (r) {
1197 return r;
1198 }
1199 r = amdgpu_bo_reserve(adev->stollen_vga_memory, false);
1200 if (r)
1201 return r;
1202 r = amdgpu_bo_pin(adev->stollen_vga_memory, AMDGPU_GEM_DOMAIN_VRAM, NULL);
1203 amdgpu_bo_unreserve(adev->stollen_vga_memory);
1204 if (r) {
1205 amdgpu_bo_unref(&adev->stollen_vga_memory);
1206 return r;
1207 }
1208 DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
1209 (unsigned) (adev->mc.real_vram_size / (1024 * 1024)));
1210 r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT,
1211 adev->mc.gtt_size >> PAGE_SHIFT);
1212 if (r) {
1213 DRM_ERROR("Failed initializing GTT heap.\n");
1214 return r;
1215 }
1216 DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
1217 (unsigned)(adev->mc.gtt_size / (1024 * 1024)));
1218
1219 adev->gds.mem.total_size = adev->gds.mem.total_size << AMDGPU_GDS_SHIFT;
1220 adev->gds.mem.gfx_partition_size = adev->gds.mem.gfx_partition_size << AMDGPU_GDS_SHIFT;
1221 adev->gds.mem.cs_partition_size = adev->gds.mem.cs_partition_size << AMDGPU_GDS_SHIFT;
1222 adev->gds.gws.total_size = adev->gds.gws.total_size << AMDGPU_GWS_SHIFT;
1223 adev->gds.gws.gfx_partition_size = adev->gds.gws.gfx_partition_size << AMDGPU_GWS_SHIFT;
1224 adev->gds.gws.cs_partition_size = adev->gds.gws.cs_partition_size << AMDGPU_GWS_SHIFT;
1225 adev->gds.oa.total_size = adev->gds.oa.total_size << AMDGPU_OA_SHIFT;
1226 adev->gds.oa.gfx_partition_size = adev->gds.oa.gfx_partition_size << AMDGPU_OA_SHIFT;
1227 adev->gds.oa.cs_partition_size = adev->gds.oa.cs_partition_size << AMDGPU_OA_SHIFT;
1228 /* GDS Memory */
1229 r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GDS,
1230 adev->gds.mem.total_size >> PAGE_SHIFT);
1231 if (r) {
1232 DRM_ERROR("Failed initializing GDS heap.\n");
1233 return r;
1234 }
1235
1236 /* GWS */
1237 r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GWS,
1238 adev->gds.gws.total_size >> PAGE_SHIFT);
1239 if (r) {
1240 DRM_ERROR("Failed initializing gws heap.\n");
1241 return r;
1242 }
1243
1244 /* OA */
1245 r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_OA,
1246 adev->gds.oa.total_size >> PAGE_SHIFT);
1247 if (r) {
1248 DRM_ERROR("Failed initializing oa heap.\n");
1249 return r;
1250 }
1251
1252 r = amdgpu_ttm_debugfs_init(adev);
1253 if (r) {
1254 DRM_ERROR("Failed to init debugfs\n");
1255 return r;
1256 }
1257 return 0;
1258}
1259
1260void amdgpu_ttm_fini(struct amdgpu_device *adev)
1261{
1262 int r;
1263
1264 if (!adev->mman.initialized)
1265 return;
1266 amdgpu_ttm_debugfs_fini(adev);
1267 if (adev->stollen_vga_memory) {
1268 r = amdgpu_bo_reserve(adev->stollen_vga_memory, false);
1269 if (r == 0) {
1270 amdgpu_bo_unpin(adev->stollen_vga_memory);
1271 amdgpu_bo_unreserve(adev->stollen_vga_memory);
1272 }
1273 amdgpu_bo_unref(&adev->stollen_vga_memory);
1274 }
1275 ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_VRAM);
1276 ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_TT);
1277 ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GDS);
1278 ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GWS);
1279 ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA);
1280 ttm_bo_device_release(&adev->mman.bdev);
1281 amdgpu_gart_fini(adev);
1282 amdgpu_ttm_global_fini(adev);
1283 adev->mman.initialized = false;
1284 DRM_INFO("amdgpu: ttm finalized\n");
1285}
1286
1287/* this should only be called at bootup or when userspace
1288 * isn't running */
1289void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size)
1290{
1291 struct ttm_mem_type_manager *man;
1292
1293 if (!adev->mman.initialized)
1294 return;
1295
1296 man = &adev->mman.bdev.man[TTM_PL_VRAM];
1297 /* this just adjusts TTM size idea, which sets lpfn to the correct value */
1298 man->size = size >> PAGE_SHIFT;
1299}
1300
1301int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
1302{
1303 struct drm_file *file_priv;
1304 struct amdgpu_device *adev;
1305
1306 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
1307 return -EINVAL;
1308
1309 file_priv = filp->private_data;
1310 adev = file_priv->minor->dev->dev_private;
1311 if (adev == NULL)
1312 return -EINVAL;
1313
1314 return ttm_bo_mmap(filp, vma, &adev->mman.bdev);
1315}
1316
1317int amdgpu_copy_buffer(struct amdgpu_ring *ring,
1318 uint64_t src_offset,
1319 uint64_t dst_offset,
1320 uint32_t byte_count,
1321 struct reservation_object *resv,
1322 struct dma_fence **fence, bool direct_submit)
1323{
1324 struct amdgpu_device *adev = ring->adev;
1325 struct amdgpu_job *job;
1326
1327 uint32_t max_bytes;
1328 unsigned num_loops, num_dw;
1329 unsigned i;
1330 int r;
1331
1332 max_bytes = adev->mman.buffer_funcs->copy_max_bytes;
1333 num_loops = DIV_ROUND_UP(byte_count, max_bytes);
1334 num_dw = num_loops * adev->mman.buffer_funcs->copy_num_dw;
1335
1336 /* for IB padding */
1337 while (num_dw & 0x7)
1338 num_dw++;
1339
1340 r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
1341 if (r)
1342 return r;
1343
1344 if (resv) {
1345 r = amdgpu_sync_resv(adev, &job->sync, resv,
1346 AMDGPU_FENCE_OWNER_UNDEFINED);
1347 if (r) {
1348 DRM_ERROR("sync failed (%d).\n", r);
1349 goto error_free;
1350 }
1351 }
1352
1353 for (i = 0; i < num_loops; i++) {
1354 uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
1355
1356 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset,
1357 dst_offset, cur_size_in_bytes);
1358
1359 src_offset += cur_size_in_bytes;
1360 dst_offset += cur_size_in_bytes;
1361 byte_count -= cur_size_in_bytes;
1362 }
1363
1364 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
1365 WARN_ON(job->ibs[0].length_dw > num_dw);
1366 if (direct_submit) {
1367 r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs,
1368 NULL, NULL, fence);
1369 job->fence = dma_fence_get(*fence);
1370 if (r)
1371 DRM_ERROR("Error scheduling IBs (%d)\n", r);
1372 amdgpu_job_free(job);
1373 } else {
1374 r = amdgpu_job_submit(job, ring, &adev->mman.entity,
1375 AMDGPU_FENCE_OWNER_UNDEFINED, fence);
1376 if (r)
1377 goto error_free;
1378 }
1379
1380 return r;
1381
1382error_free:
1383 amdgpu_job_free(job);
1384 return r;
1385}
1386
1387int amdgpu_fill_buffer(struct amdgpu_bo *bo,
1388 uint32_t src_data,
1389 struct reservation_object *resv,
1390 struct dma_fence **fence)
1391{
1392 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1393 uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
1394 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
1395
1396 struct drm_mm_node *mm_node;
1397 unsigned long num_pages;
1398 unsigned int num_loops, num_dw;
1399
1400 struct amdgpu_job *job;
1401 int r;
1402
1403 if (!ring->ready) {
1404 DRM_ERROR("Trying to clear memory with ring turned off.\n");
1405 return -EINVAL;
1406 }
1407
1408 num_pages = bo->tbo.num_pages;
1409 mm_node = bo->tbo.mem.mm_node;
1410 num_loops = 0;
1411 while (num_pages) {
1412 uint32_t byte_count = mm_node->size << PAGE_SHIFT;
1413
1414 num_loops += DIV_ROUND_UP(byte_count, max_bytes);
1415 num_pages -= mm_node->size;
1416 ++mm_node;
1417 }
1418 num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw;
1419
1420 /* for IB padding */
1421 num_dw += 64;
1422
1423 r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
1424 if (r)
1425 return r;
1426
1427 if (resv) {
1428 r = amdgpu_sync_resv(adev, &job->sync, resv,
1429 AMDGPU_FENCE_OWNER_UNDEFINED);
1430 if (r) {
1431 DRM_ERROR("sync failed (%d).\n", r);
1432 goto error_free;
1433 }
1434 }
1435
1436 num_pages = bo->tbo.num_pages;
1437 mm_node = bo->tbo.mem.mm_node;
1438
1439 while (num_pages) {
1440 uint32_t byte_count = mm_node->size << PAGE_SHIFT;
1441 uint64_t dst_addr;
1442
1443 r = amdgpu_mm_node_addr(&bo->tbo, mm_node,
1444 &bo->tbo.mem, &dst_addr);
1445 if (r)
1446 return r;
1447
1448 while (byte_count) {
1449 uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
1450
1451 amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data,
1452 dst_addr, cur_size_in_bytes);
1453
1454 dst_addr += cur_size_in_bytes;
1455 byte_count -= cur_size_in_bytes;
1456 }
1457
1458 num_pages -= mm_node->size;
1459 ++mm_node;
1460 }
1461
1462 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
1463 WARN_ON(job->ibs[0].length_dw > num_dw);
1464 r = amdgpu_job_submit(job, ring, &adev->mman.entity,
1465 AMDGPU_FENCE_OWNER_UNDEFINED, fence);
1466 if (r)
1467 goto error_free;
1468
1469 return 0;
1470
1471error_free:
1472 amdgpu_job_free(job);
1473 return r;
1474}
1475
1476#if defined(CONFIG_DEBUG_FS)
1477
1478static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
1479{
1480 struct drm_info_node *node = (struct drm_info_node *)m->private;
1481 unsigned ttm_pl = *(int *)node->info_ent->data;
1482 struct drm_device *dev = node->minor->dev;
1483 struct amdgpu_device *adev = dev->dev_private;
1484 struct drm_mm *mm = (struct drm_mm *)adev->mman.bdev.man[ttm_pl].priv;
1485 int ret;
1486 struct ttm_bo_global *glob = adev->mman.bdev.glob;
1487
1488 spin_lock(&glob->lru_lock);
1489 ret = drm_mm_dump_table(m, mm);
1490 spin_unlock(&glob->lru_lock);
1491 if (ttm_pl == TTM_PL_VRAM)
1492 seq_printf(m, "man size:%llu pages, ram usage:%lluMB, vis usage:%lluMB\n",
1493 adev->mman.bdev.man[ttm_pl].size,
1494 (u64)atomic64_read(&adev->vram_usage) >> 20,
1495 (u64)atomic64_read(&adev->vram_vis_usage) >> 20);
1496 return ret;
1497}
1498
1499static int ttm_pl_vram = TTM_PL_VRAM;
1500static int ttm_pl_tt = TTM_PL_TT;
1501
1502static const struct drm_info_list amdgpu_ttm_debugfs_list[] = {
1503 {"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, &ttm_pl_vram},
1504 {"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, &ttm_pl_tt},
1505 {"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL},
1506#ifdef CONFIG_SWIOTLB
1507 {"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL}
1508#endif
1509};
1510
1511static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
1512 size_t size, loff_t *pos)
1513{
1514 struct amdgpu_device *adev = file_inode(f)->i_private;
1515 ssize_t result = 0;
1516 int r;
1517
1518 if (size & 0x3 || *pos & 0x3)
1519 return -EINVAL;
1520
1521 while (size) {
1522 unsigned long flags;
1523 uint32_t value;
1524
1525 if (*pos >= adev->mc.mc_vram_size)
1526 return result;
1527
1528 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
1529 WREG32(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
1530 WREG32(mmMM_INDEX_HI, *pos >> 31);
1531 value = RREG32(mmMM_DATA);
1532 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
1533
1534 r = put_user(value, (uint32_t *)buf);
1535 if (r)
1536 return r;
1537
1538 result += 4;
1539 buf += 4;
1540 *pos += 4;
1541 size -= 4;
1542 }
1543
1544 return result;
1545}
1546
1547static const struct file_operations amdgpu_ttm_vram_fops = {
1548 .owner = THIS_MODULE,
1549 .read = amdgpu_ttm_vram_read,
1550 .llseek = default_llseek
1551};
1552
1553#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
1554
1555static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf,
1556 size_t size, loff_t *pos)
1557{
1558 struct amdgpu_device *adev = file_inode(f)->i_private;
1559 ssize_t result = 0;
1560 int r;
1561
1562 while (size) {
1563 loff_t p = *pos / PAGE_SIZE;
1564 unsigned off = *pos & ~PAGE_MASK;
1565 size_t cur_size = min_t(size_t, size, PAGE_SIZE - off);
1566 struct page *page;
1567 void *ptr;
1568
1569 if (p >= adev->gart.num_cpu_pages)
1570 return result;
1571
1572 page = adev->gart.pages[p];
1573 if (page) {
1574 ptr = kmap(page);
1575 ptr += off;
1576
1577 r = copy_to_user(buf, ptr, cur_size);
1578 kunmap(adev->gart.pages[p]);
1579 } else
1580 r = clear_user(buf, cur_size);
1581
1582 if (r)
1583 return -EFAULT;
1584
1585 result += cur_size;
1586 buf += cur_size;
1587 *pos += cur_size;
1588 size -= cur_size;
1589 }
1590
1591 return result;
1592}
1593
1594static const struct file_operations amdgpu_ttm_gtt_fops = {
1595 .owner = THIS_MODULE,
1596 .read = amdgpu_ttm_gtt_read,
1597 .llseek = default_llseek
1598};
1599
1600#endif
1601
1602#endif
1603
1604static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
1605{
1606#if defined(CONFIG_DEBUG_FS)
1607 unsigned count;
1608
1609 struct drm_minor *minor = adev->ddev->primary;
1610 struct dentry *ent, *root = minor->debugfs_root;
1611
1612 ent = debugfs_create_file("amdgpu_vram", S_IFREG | S_IRUGO, root,
1613 adev, &amdgpu_ttm_vram_fops);
1614 if (IS_ERR(ent))
1615 return PTR_ERR(ent);
1616 i_size_write(ent->d_inode, adev->mc.mc_vram_size);
1617 adev->mman.vram = ent;
1618
1619#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
1620 ent = debugfs_create_file("amdgpu_gtt", S_IFREG | S_IRUGO, root,
1621 adev, &amdgpu_ttm_gtt_fops);
1622 if (IS_ERR(ent))
1623 return PTR_ERR(ent);
1624 i_size_write(ent->d_inode, adev->mc.gtt_size);
1625 adev->mman.gtt = ent;
1626
1627#endif
1628 count = ARRAY_SIZE(amdgpu_ttm_debugfs_list);
1629
1630#ifdef CONFIG_SWIOTLB
1631 if (!swiotlb_nr_tbl())
1632 --count;
1633#endif
1634
1635 return amdgpu_debugfs_add_files(adev, amdgpu_ttm_debugfs_list, count);
1636#else
1637
1638 return 0;
1639#endif
1640}
1641
1642static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev)
1643{
1644#if defined(CONFIG_DEBUG_FS)
1645
1646 debugfs_remove(adev->mman.vram);
1647 adev->mman.vram = NULL;
1648
1649#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
1650 debugfs_remove(adev->mman.gtt);
1651 adev->mman.gtt = NULL;
1652#endif
1653
1654#endif
1655}