Loading...
1/*
2 * Copyright 2009 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30 * Dave Airlie
31 */
32#include <linux/list.h>
33#include <linux/slab.h>
34#include <linux/dma-buf.h>
35
36#include <drm/drm_drv.h>
37#include <drm/amdgpu_drm.h>
38#include <drm/drm_cache.h>
39#include "amdgpu.h"
40#include "amdgpu_trace.h"
41#include "amdgpu_amdkfd.h"
42
43/**
44 * DOC: amdgpu_object
45 *
46 * This defines the interfaces to operate on an &amdgpu_bo buffer object which
47 * represents memory used by driver (VRAM, system memory, etc.). The driver
48 * provides DRM/GEM APIs to userspace. DRM/GEM APIs then use these interfaces
49 * to create/destroy/set buffer object which are then managed by the kernel TTM
50 * memory manager.
51 * The interfaces are also used internally by kernel clients, including gfx,
52 * uvd, etc. for kernel managed allocations used by the GPU.
53 *
54 */
55
56static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
57{
58 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
59
60 amdgpu_bo_kunmap(bo);
61
62 if (bo->tbo.base.import_attach)
63 drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg);
64 drm_gem_object_release(&bo->tbo.base);
65 amdgpu_bo_unref(&bo->parent);
66 kvfree(bo);
67}
68
69static void amdgpu_bo_user_destroy(struct ttm_buffer_object *tbo)
70{
71 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
72 struct amdgpu_bo_user *ubo;
73
74 ubo = to_amdgpu_bo_user(bo);
75 kfree(ubo->metadata);
76 amdgpu_bo_destroy(tbo);
77}
78
79static void amdgpu_bo_vm_destroy(struct ttm_buffer_object *tbo)
80{
81 struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
82 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
83 struct amdgpu_bo_vm *vmbo;
84
85 vmbo = to_amdgpu_bo_vm(bo);
86 /* in case amdgpu_device_recover_vram got NULL of bo->parent */
87 if (!list_empty(&vmbo->shadow_list)) {
88 mutex_lock(&adev->shadow_list_lock);
89 list_del_init(&vmbo->shadow_list);
90 mutex_unlock(&adev->shadow_list_lock);
91 }
92
93 amdgpu_bo_destroy(tbo);
94}
95
96/**
97 * amdgpu_bo_is_amdgpu_bo - check if the buffer object is an &amdgpu_bo
98 * @bo: buffer object to be checked
99 *
100 * Uses destroy function associated with the object to determine if this is
101 * an &amdgpu_bo.
102 *
103 * Returns:
104 * true if the object belongs to &amdgpu_bo, false if not.
105 */
106bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
107{
108 if (bo->destroy == &amdgpu_bo_destroy ||
109 bo->destroy == &amdgpu_bo_user_destroy ||
110 bo->destroy == &amdgpu_bo_vm_destroy)
111 return true;
112
113 return false;
114}
115
116/**
117 * amdgpu_bo_placement_from_domain - set buffer's placement
118 * @abo: &amdgpu_bo buffer object whose placement is to be set
119 * @domain: requested domain
120 *
121 * Sets buffer's placement according to requested domain and the buffer's
122 * flags.
123 */
124void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
125{
126 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
127 struct ttm_placement *placement = &abo->placement;
128 struct ttm_place *places = abo->placements;
129 u64 flags = abo->flags;
130 u32 c = 0;
131
132 if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
133 unsigned visible_pfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
134
135 places[c].fpfn = 0;
136 places[c].lpfn = 0;
137 places[c].mem_type = TTM_PL_VRAM;
138 places[c].flags = 0;
139
140 if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
141 places[c].lpfn = visible_pfn;
142 else
143 places[c].flags |= TTM_PL_FLAG_TOPDOWN;
144
145 if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
146 places[c].flags |= TTM_PL_FLAG_CONTIGUOUS;
147 c++;
148 }
149
150 if (domain & AMDGPU_GEM_DOMAIN_GTT) {
151 places[c].fpfn = 0;
152 places[c].lpfn = 0;
153 places[c].mem_type =
154 abo->flags & AMDGPU_GEM_CREATE_PREEMPTIBLE ?
155 AMDGPU_PL_PREEMPT : TTM_PL_TT;
156 places[c].flags = 0;
157 c++;
158 }
159
160 if (domain & AMDGPU_GEM_DOMAIN_CPU) {
161 places[c].fpfn = 0;
162 places[c].lpfn = 0;
163 places[c].mem_type = TTM_PL_SYSTEM;
164 places[c].flags = 0;
165 c++;
166 }
167
168 if (domain & AMDGPU_GEM_DOMAIN_GDS) {
169 places[c].fpfn = 0;
170 places[c].lpfn = 0;
171 places[c].mem_type = AMDGPU_PL_GDS;
172 places[c].flags = 0;
173 c++;
174 }
175
176 if (domain & AMDGPU_GEM_DOMAIN_GWS) {
177 places[c].fpfn = 0;
178 places[c].lpfn = 0;
179 places[c].mem_type = AMDGPU_PL_GWS;
180 places[c].flags = 0;
181 c++;
182 }
183
184 if (domain & AMDGPU_GEM_DOMAIN_OA) {
185 places[c].fpfn = 0;
186 places[c].lpfn = 0;
187 places[c].mem_type = AMDGPU_PL_OA;
188 places[c].flags = 0;
189 c++;
190 }
191
192 if (!c) {
193 places[c].fpfn = 0;
194 places[c].lpfn = 0;
195 places[c].mem_type = TTM_PL_SYSTEM;
196 places[c].flags = 0;
197 c++;
198 }
199
200 BUG_ON(c > AMDGPU_BO_MAX_PLACEMENTS);
201
202 placement->num_placement = c;
203 placement->placement = places;
204
205 placement->num_busy_placement = c;
206 placement->busy_placement = places;
207}
208
209/**
210 * amdgpu_bo_create_reserved - create reserved BO for kernel use
211 *
212 * @adev: amdgpu device object
213 * @size: size for the new BO
214 * @align: alignment for the new BO
215 * @domain: where to place it
216 * @bo_ptr: used to initialize BOs in structures
217 * @gpu_addr: GPU addr of the pinned BO
218 * @cpu_addr: optional CPU address mapping
219 *
220 * Allocates and pins a BO for kernel internal use, and returns it still
221 * reserved.
222 *
223 * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
224 *
225 * Returns:
226 * 0 on success, negative error code otherwise.
227 */
228int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
229 unsigned long size, int align,
230 u32 domain, struct amdgpu_bo **bo_ptr,
231 u64 *gpu_addr, void **cpu_addr)
232{
233 struct amdgpu_bo_param bp;
234 bool free = false;
235 int r;
236
237 if (!size) {
238 amdgpu_bo_unref(bo_ptr);
239 return 0;
240 }
241
242 memset(&bp, 0, sizeof(bp));
243 bp.size = size;
244 bp.byte_align = align;
245 bp.domain = domain;
246 bp.flags = cpu_addr ? AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
247 : AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
248 bp.flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
249 bp.type = ttm_bo_type_kernel;
250 bp.resv = NULL;
251 bp.bo_ptr_size = sizeof(struct amdgpu_bo);
252
253 if (!*bo_ptr) {
254 r = amdgpu_bo_create(adev, &bp, bo_ptr);
255 if (r) {
256 dev_err(adev->dev, "(%d) failed to allocate kernel bo\n",
257 r);
258 return r;
259 }
260 free = true;
261 }
262
263 r = amdgpu_bo_reserve(*bo_ptr, false);
264 if (r) {
265 dev_err(adev->dev, "(%d) failed to reserve kernel bo\n", r);
266 goto error_free;
267 }
268
269 r = amdgpu_bo_pin(*bo_ptr, domain);
270 if (r) {
271 dev_err(adev->dev, "(%d) kernel bo pin failed\n", r);
272 goto error_unreserve;
273 }
274
275 r = amdgpu_ttm_alloc_gart(&(*bo_ptr)->tbo);
276 if (r) {
277 dev_err(adev->dev, "%p bind failed\n", *bo_ptr);
278 goto error_unpin;
279 }
280
281 if (gpu_addr)
282 *gpu_addr = amdgpu_bo_gpu_offset(*bo_ptr);
283
284 if (cpu_addr) {
285 r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
286 if (r) {
287 dev_err(adev->dev, "(%d) kernel bo map failed\n", r);
288 goto error_unpin;
289 }
290 }
291
292 return 0;
293
294error_unpin:
295 amdgpu_bo_unpin(*bo_ptr);
296error_unreserve:
297 amdgpu_bo_unreserve(*bo_ptr);
298
299error_free:
300 if (free)
301 amdgpu_bo_unref(bo_ptr);
302
303 return r;
304}
305
306/**
307 * amdgpu_bo_create_kernel - create BO for kernel use
308 *
309 * @adev: amdgpu device object
310 * @size: size for the new BO
311 * @align: alignment for the new BO
312 * @domain: where to place it
313 * @bo_ptr: used to initialize BOs in structures
314 * @gpu_addr: GPU addr of the pinned BO
315 * @cpu_addr: optional CPU address mapping
316 *
317 * Allocates and pins a BO for kernel internal use.
318 *
319 * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
320 *
321 * Returns:
322 * 0 on success, negative error code otherwise.
323 */
324int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
325 unsigned long size, int align,
326 u32 domain, struct amdgpu_bo **bo_ptr,
327 u64 *gpu_addr, void **cpu_addr)
328{
329 int r;
330
331 r = amdgpu_bo_create_reserved(adev, size, align, domain, bo_ptr,
332 gpu_addr, cpu_addr);
333
334 if (r)
335 return r;
336
337 if (*bo_ptr)
338 amdgpu_bo_unreserve(*bo_ptr);
339
340 return 0;
341}
342
343/**
344 * amdgpu_bo_create_kernel_at - create BO for kernel use at specific location
345 *
346 * @adev: amdgpu device object
347 * @offset: offset of the BO
348 * @size: size of the BO
349 * @bo_ptr: used to initialize BOs in structures
350 * @cpu_addr: optional CPU address mapping
351 *
352 * Creates a kernel BO at a specific offset in VRAM.
353 *
354 * Returns:
355 * 0 on success, negative error code otherwise.
356 */
357int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
358 uint64_t offset, uint64_t size,
359 struct amdgpu_bo **bo_ptr, void **cpu_addr)
360{
361 struct ttm_operation_ctx ctx = { false, false };
362 unsigned int i;
363 int r;
364
365 offset &= PAGE_MASK;
366 size = ALIGN(size, PAGE_SIZE);
367
368 r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE,
369 AMDGPU_GEM_DOMAIN_VRAM, bo_ptr, NULL,
370 cpu_addr);
371 if (r)
372 return r;
373
374 if ((*bo_ptr) == NULL)
375 return 0;
376
377 /*
378 * Remove the original mem node and create a new one at the request
379 * position.
380 */
381 if (cpu_addr)
382 amdgpu_bo_kunmap(*bo_ptr);
383
384 ttm_resource_free(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.resource);
385
386 for (i = 0; i < (*bo_ptr)->placement.num_placement; ++i) {
387 (*bo_ptr)->placements[i].fpfn = offset >> PAGE_SHIFT;
388 (*bo_ptr)->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
389 }
390 r = ttm_bo_mem_space(&(*bo_ptr)->tbo, &(*bo_ptr)->placement,
391 &(*bo_ptr)->tbo.resource, &ctx);
392 if (r)
393 goto error;
394
395 if (cpu_addr) {
396 r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
397 if (r)
398 goto error;
399 }
400
401 amdgpu_bo_unreserve(*bo_ptr);
402 return 0;
403
404error:
405 amdgpu_bo_unreserve(*bo_ptr);
406 amdgpu_bo_unref(bo_ptr);
407 return r;
408}
409
410/**
411 * amdgpu_bo_free_kernel - free BO for kernel use
412 *
413 * @bo: amdgpu BO to free
414 * @gpu_addr: pointer to where the BO's GPU memory space address was stored
415 * @cpu_addr: pointer to where the BO's CPU memory space address was stored
416 *
417 * unmaps and unpin a BO for kernel internal use.
418 */
419void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
420 void **cpu_addr)
421{
422 if (*bo == NULL)
423 return;
424
425 WARN_ON(amdgpu_ttm_adev((*bo)->tbo.bdev)->in_suspend);
426
427 if (likely(amdgpu_bo_reserve(*bo, true) == 0)) {
428 if (cpu_addr)
429 amdgpu_bo_kunmap(*bo);
430
431 amdgpu_bo_unpin(*bo);
432 amdgpu_bo_unreserve(*bo);
433 }
434 amdgpu_bo_unref(bo);
435
436 if (gpu_addr)
437 *gpu_addr = 0;
438
439 if (cpu_addr)
440 *cpu_addr = NULL;
441}
442
443/* Validate bo size is bit bigger then the request domain */
444static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
445 unsigned long size, u32 domain)
446{
447 struct ttm_resource_manager *man = NULL;
448
449 /*
450 * If GTT is part of requested domains the check must succeed to
451 * allow fall back to GTT.
452 */
453 if (domain & AMDGPU_GEM_DOMAIN_GTT) {
454 man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
455
456 if (man && size < man->size)
457 return true;
458 else if (!man)
459 WARN_ON_ONCE("GTT domain requested but GTT mem manager uninitialized");
460 goto fail;
461 } else if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
462 man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
463
464 if (man && size < man->size)
465 return true;
466 goto fail;
467 }
468
469 /* TODO add more domains checks, such as AMDGPU_GEM_DOMAIN_CPU */
470 return true;
471
472fail:
473 if (man)
474 DRM_DEBUG("BO size %lu > total memory in domain: %llu\n", size,
475 man->size);
476 return false;
477}
478
479bool amdgpu_bo_support_uswc(u64 bo_flags)
480{
481
482#ifdef CONFIG_X86_32
483 /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
484 * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
485 */
486 return false;
487#elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
488 /* Don't try to enable write-combining when it can't work, or things
489 * may be slow
490 * See https://bugs.freedesktop.org/show_bug.cgi?id=88758
491 */
492
493#ifndef CONFIG_COMPILE_TEST
494#warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
495 thanks to write-combining
496#endif
497
498 if (bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
499 DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
500 "better performance thanks to write-combining\n");
501 return false;
502#else
503 /* For architectures that don't support WC memory,
504 * mask out the WC flag from the BO
505 */
506 if (!drm_arch_can_wc_memory())
507 return false;
508
509 return true;
510#endif
511}
512
513/**
514 * amdgpu_bo_create - create an &amdgpu_bo buffer object
515 * @adev: amdgpu device object
516 * @bp: parameters to be used for the buffer object
517 * @bo_ptr: pointer to the buffer object pointer
518 *
519 * Creates an &amdgpu_bo buffer object.
520 *
521 * Returns:
522 * 0 for success or a negative error code on failure.
523 */
524int amdgpu_bo_create(struct amdgpu_device *adev,
525 struct amdgpu_bo_param *bp,
526 struct amdgpu_bo **bo_ptr)
527{
528 struct ttm_operation_ctx ctx = {
529 .interruptible = (bp->type != ttm_bo_type_kernel),
530 .no_wait_gpu = bp->no_wait_gpu,
531 /* We opt to avoid OOM on system pages allocations */
532 .gfp_retry_mayfail = true,
533 .allow_res_evict = bp->type != ttm_bo_type_kernel,
534 .resv = bp->resv
535 };
536 struct amdgpu_bo *bo;
537 unsigned long page_align, size = bp->size;
538 int r;
539
540 /* Note that GDS/GWS/OA allocates 1 page per byte/resource. */
541 if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
542 /* GWS and OA don't need any alignment. */
543 page_align = bp->byte_align;
544 size <<= PAGE_SHIFT;
545
546 } else if (bp->domain & AMDGPU_GEM_DOMAIN_GDS) {
547 /* Both size and alignment must be a multiple of 4. */
548 page_align = ALIGN(bp->byte_align, 4);
549 size = ALIGN(size, 4) << PAGE_SHIFT;
550 } else {
551 /* Memory should be aligned at least to a page size. */
552 page_align = ALIGN(bp->byte_align, PAGE_SIZE) >> PAGE_SHIFT;
553 size = ALIGN(size, PAGE_SIZE);
554 }
555
556 if (!amdgpu_bo_validate_size(adev, size, bp->domain))
557 return -ENOMEM;
558
559 BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo));
560
561 *bo_ptr = NULL;
562 bo = kvzalloc(bp->bo_ptr_size, GFP_KERNEL);
563 if (bo == NULL)
564 return -ENOMEM;
565 drm_gem_private_object_init(adev_to_drm(adev), &bo->tbo.base, size);
566 bo->vm_bo = NULL;
567 bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain :
568 bp->domain;
569 bo->allowed_domains = bo->preferred_domains;
570 if (bp->type != ttm_bo_type_kernel &&
571 !(bp->flags & AMDGPU_GEM_CREATE_DISCARDABLE) &&
572 bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
573 bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
574
575 bo->flags = bp->flags;
576
577 if (!amdgpu_bo_support_uswc(bo->flags))
578 bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
579
580 if (adev->ras_enabled)
581 bo->flags |= AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
582
583 bo->tbo.bdev = &adev->mman.bdev;
584 if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA |
585 AMDGPU_GEM_DOMAIN_GDS))
586 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
587 else
588 amdgpu_bo_placement_from_domain(bo, bp->domain);
589 if (bp->type == ttm_bo_type_kernel)
590 bo->tbo.priority = 1;
591
592 if (!bp->destroy)
593 bp->destroy = &amdgpu_bo_destroy;
594
595 r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, bp->type,
596 &bo->placement, page_align, &ctx, NULL,
597 bp->resv, bp->destroy);
598 if (unlikely(r != 0))
599 return r;
600
601 if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
602 bo->tbo.resource->mem_type == TTM_PL_VRAM &&
603 bo->tbo.resource->start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
604 amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
605 ctx.bytes_moved);
606 else
607 amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0);
608
609 if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
610 bo->tbo.resource->mem_type == TTM_PL_VRAM) {
611 struct dma_fence *fence;
612
613 r = amdgpu_fill_buffer(bo, 0, bo->tbo.base.resv, &fence);
614 if (unlikely(r))
615 goto fail_unreserve;
616
617 dma_resv_add_fence(bo->tbo.base.resv, fence,
618 DMA_RESV_USAGE_KERNEL);
619 dma_fence_put(fence);
620 }
621 if (!bp->resv)
622 amdgpu_bo_unreserve(bo);
623 *bo_ptr = bo;
624
625 trace_amdgpu_bo_create(bo);
626
627 /* Treat CPU_ACCESS_REQUIRED only as a hint if given by UMD */
628 if (bp->type == ttm_bo_type_device)
629 bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
630
631 return 0;
632
633fail_unreserve:
634 if (!bp->resv)
635 dma_resv_unlock(bo->tbo.base.resv);
636 amdgpu_bo_unref(&bo);
637 return r;
638}
639
640/**
641 * amdgpu_bo_create_user - create an &amdgpu_bo_user buffer object
642 * @adev: amdgpu device object
643 * @bp: parameters to be used for the buffer object
644 * @ubo_ptr: pointer to the buffer object pointer
645 *
646 * Create a BO to be used by user application;
647 *
648 * Returns:
649 * 0 for success or a negative error code on failure.
650 */
651
652int amdgpu_bo_create_user(struct amdgpu_device *adev,
653 struct amdgpu_bo_param *bp,
654 struct amdgpu_bo_user **ubo_ptr)
655{
656 struct amdgpu_bo *bo_ptr;
657 int r;
658
659 bp->bo_ptr_size = sizeof(struct amdgpu_bo_user);
660 bp->destroy = &amdgpu_bo_user_destroy;
661 r = amdgpu_bo_create(adev, bp, &bo_ptr);
662 if (r)
663 return r;
664
665 *ubo_ptr = to_amdgpu_bo_user(bo_ptr);
666 return r;
667}
668
669/**
670 * amdgpu_bo_create_vm - create an &amdgpu_bo_vm buffer object
671 * @adev: amdgpu device object
672 * @bp: parameters to be used for the buffer object
673 * @vmbo_ptr: pointer to the buffer object pointer
674 *
675 * Create a BO to be for GPUVM.
676 *
677 * Returns:
678 * 0 for success or a negative error code on failure.
679 */
680
681int amdgpu_bo_create_vm(struct amdgpu_device *adev,
682 struct amdgpu_bo_param *bp,
683 struct amdgpu_bo_vm **vmbo_ptr)
684{
685 struct amdgpu_bo *bo_ptr;
686 int r;
687
688 /* bo_ptr_size will be determined by the caller and it depends on
689 * num of amdgpu_vm_pt entries.
690 */
691 BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo_vm));
692 r = amdgpu_bo_create(adev, bp, &bo_ptr);
693 if (r)
694 return r;
695
696 *vmbo_ptr = to_amdgpu_bo_vm(bo_ptr);
697 INIT_LIST_HEAD(&(*vmbo_ptr)->shadow_list);
698 /* Set destroy callback to amdgpu_bo_vm_destroy after vmbo->shadow_list
699 * is initialized.
700 */
701 bo_ptr->tbo.destroy = &amdgpu_bo_vm_destroy;
702 return r;
703}
704
705/**
706 * amdgpu_bo_add_to_shadow_list - add a BO to the shadow list
707 *
708 * @vmbo: BO that will be inserted into the shadow list
709 *
710 * Insert a BO to the shadow list.
711 */
712void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo)
713{
714 struct amdgpu_device *adev = amdgpu_ttm_adev(vmbo->bo.tbo.bdev);
715
716 mutex_lock(&adev->shadow_list_lock);
717 list_add_tail(&vmbo->shadow_list, &adev->shadow_list);
718 mutex_unlock(&adev->shadow_list_lock);
719}
720
721/**
722 * amdgpu_bo_restore_shadow - restore an &amdgpu_bo shadow
723 *
724 * @shadow: &amdgpu_bo shadow to be restored
725 * @fence: dma_fence associated with the operation
726 *
727 * Copies a buffer object's shadow content back to the object.
728 * This is used for recovering a buffer from its shadow in case of a gpu
729 * reset where vram context may be lost.
730 *
731 * Returns:
732 * 0 for success or a negative error code on failure.
733 */
734int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow, struct dma_fence **fence)
735
736{
737 struct amdgpu_device *adev = amdgpu_ttm_adev(shadow->tbo.bdev);
738 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
739 uint64_t shadow_addr, parent_addr;
740
741 shadow_addr = amdgpu_bo_gpu_offset(shadow);
742 parent_addr = amdgpu_bo_gpu_offset(shadow->parent);
743
744 return amdgpu_copy_buffer(ring, shadow_addr, parent_addr,
745 amdgpu_bo_size(shadow), NULL, fence,
746 true, false, false);
747}
748
749/**
750 * amdgpu_bo_kmap - map an &amdgpu_bo buffer object
751 * @bo: &amdgpu_bo buffer object to be mapped
752 * @ptr: kernel virtual address to be returned
753 *
754 * Calls ttm_bo_kmap() to set up the kernel virtual mapping; calls
755 * amdgpu_bo_kptr() to get the kernel virtual address.
756 *
757 * Returns:
758 * 0 for success or a negative error code on failure.
759 */
760int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
761{
762 void *kptr;
763 long r;
764
765 if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
766 return -EPERM;
767
768 r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_KERNEL,
769 false, MAX_SCHEDULE_TIMEOUT);
770 if (r < 0)
771 return r;
772
773 kptr = amdgpu_bo_kptr(bo);
774 if (kptr) {
775 if (ptr)
776 *ptr = kptr;
777 return 0;
778 }
779
780 r = ttm_bo_kmap(&bo->tbo, 0, PFN_UP(bo->tbo.base.size), &bo->kmap);
781 if (r)
782 return r;
783
784 if (ptr)
785 *ptr = amdgpu_bo_kptr(bo);
786
787 return 0;
788}
789
790/**
791 * amdgpu_bo_kptr - returns a kernel virtual address of the buffer object
792 * @bo: &amdgpu_bo buffer object
793 *
794 * Calls ttm_kmap_obj_virtual() to get the kernel virtual address
795 *
796 * Returns:
797 * the virtual address of a buffer object area.
798 */
799void *amdgpu_bo_kptr(struct amdgpu_bo *bo)
800{
801 bool is_iomem;
802
803 return ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
804}
805
806/**
807 * amdgpu_bo_kunmap - unmap an &amdgpu_bo buffer object
808 * @bo: &amdgpu_bo buffer object to be unmapped
809 *
810 * Unmaps a kernel map set up by amdgpu_bo_kmap().
811 */
812void amdgpu_bo_kunmap(struct amdgpu_bo *bo)
813{
814 if (bo->kmap.bo)
815 ttm_bo_kunmap(&bo->kmap);
816}
817
818/**
819 * amdgpu_bo_ref - reference an &amdgpu_bo buffer object
820 * @bo: &amdgpu_bo buffer object
821 *
822 * References the contained &ttm_buffer_object.
823 *
824 * Returns:
825 * a refcounted pointer to the &amdgpu_bo buffer object.
826 */
827struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
828{
829 if (bo == NULL)
830 return NULL;
831
832 ttm_bo_get(&bo->tbo);
833 return bo;
834}
835
836/**
837 * amdgpu_bo_unref - unreference an &amdgpu_bo buffer object
838 * @bo: &amdgpu_bo buffer object
839 *
840 * Unreferences the contained &ttm_buffer_object and clear the pointer
841 */
842void amdgpu_bo_unref(struct amdgpu_bo **bo)
843{
844 struct ttm_buffer_object *tbo;
845
846 if ((*bo) == NULL)
847 return;
848
849 tbo = &((*bo)->tbo);
850 ttm_bo_put(tbo);
851 *bo = NULL;
852}
853
854/**
855 * amdgpu_bo_pin_restricted - pin an &amdgpu_bo buffer object
856 * @bo: &amdgpu_bo buffer object to be pinned
857 * @domain: domain to be pinned to
858 * @min_offset: the start of requested address range
859 * @max_offset: the end of requested address range
860 *
861 * Pins the buffer object according to requested domain and address range. If
862 * the memory is unbound gart memory, binds the pages into gart table. Adjusts
863 * pin_count and pin_size accordingly.
864 *
865 * Pinning means to lock pages in memory along with keeping them at a fixed
866 * offset. It is required when a buffer can not be moved, for example, when
867 * a display buffer is being scanned out.
868 *
869 * Compared with amdgpu_bo_pin(), this function gives more flexibility on
870 * where to pin a buffer if there are specific restrictions on where a buffer
871 * must be located.
872 *
873 * Returns:
874 * 0 for success or a negative error code on failure.
875 */
876int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
877 u64 min_offset, u64 max_offset)
878{
879 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
880 struct ttm_operation_ctx ctx = { false, false };
881 int r, i;
882
883 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
884 return -EPERM;
885
886 if (WARN_ON_ONCE(min_offset > max_offset))
887 return -EINVAL;
888
889 /* Check domain to be pinned to against preferred domains */
890 if (bo->preferred_domains & domain)
891 domain = bo->preferred_domains & domain;
892
893 /* A shared bo cannot be migrated to VRAM */
894 if (bo->tbo.base.import_attach) {
895 if (domain & AMDGPU_GEM_DOMAIN_GTT)
896 domain = AMDGPU_GEM_DOMAIN_GTT;
897 else
898 return -EINVAL;
899 }
900
901 if (bo->tbo.pin_count) {
902 uint32_t mem_type = bo->tbo.resource->mem_type;
903 uint32_t mem_flags = bo->tbo.resource->placement;
904
905 if (!(domain & amdgpu_mem_type_to_domain(mem_type)))
906 return -EINVAL;
907
908 if ((mem_type == TTM_PL_VRAM) &&
909 (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) &&
910 !(mem_flags & TTM_PL_FLAG_CONTIGUOUS))
911 return -EINVAL;
912
913 ttm_bo_pin(&bo->tbo);
914
915 if (max_offset != 0) {
916 u64 domain_start = amdgpu_ttm_domain_start(adev,
917 mem_type);
918 WARN_ON_ONCE(max_offset <
919 (amdgpu_bo_gpu_offset(bo) - domain_start));
920 }
921
922 return 0;
923 }
924
925 /* This assumes only APU display buffers are pinned with (VRAM|GTT).
926 * See function amdgpu_display_supported_domains()
927 */
928 domain = amdgpu_bo_get_preferred_domain(adev, domain);
929
930 if (bo->tbo.base.import_attach)
931 dma_buf_pin(bo->tbo.base.import_attach);
932
933 /* force to pin into visible video ram */
934 if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS))
935 bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
936 amdgpu_bo_placement_from_domain(bo, domain);
937 for (i = 0; i < bo->placement.num_placement; i++) {
938 unsigned fpfn, lpfn;
939
940 fpfn = min_offset >> PAGE_SHIFT;
941 lpfn = max_offset >> PAGE_SHIFT;
942
943 if (fpfn > bo->placements[i].fpfn)
944 bo->placements[i].fpfn = fpfn;
945 if (!bo->placements[i].lpfn ||
946 (lpfn && lpfn < bo->placements[i].lpfn))
947 bo->placements[i].lpfn = lpfn;
948 }
949
950 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
951 if (unlikely(r)) {
952 dev_err(adev->dev, "%p pin failed\n", bo);
953 goto error;
954 }
955
956 ttm_bo_pin(&bo->tbo);
957
958 domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
959 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
960 atomic64_add(amdgpu_bo_size(bo), &adev->vram_pin_size);
961 atomic64_add(amdgpu_vram_mgr_bo_visible_size(bo),
962 &adev->visible_pin_size);
963 } else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
964 atomic64_add(amdgpu_bo_size(bo), &adev->gart_pin_size);
965 }
966
967error:
968 return r;
969}
970
971/**
972 * amdgpu_bo_pin - pin an &amdgpu_bo buffer object
973 * @bo: &amdgpu_bo buffer object to be pinned
974 * @domain: domain to be pinned to
975 *
976 * A simple wrapper to amdgpu_bo_pin_restricted().
977 * Provides a simpler API for buffers that do not have any strict restrictions
978 * on where a buffer must be located.
979 *
980 * Returns:
981 * 0 for success or a negative error code on failure.
982 */
983int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain)
984{
985 bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
986 return amdgpu_bo_pin_restricted(bo, domain, 0, 0);
987}
988
989/**
990 * amdgpu_bo_unpin - unpin an &amdgpu_bo buffer object
991 * @bo: &amdgpu_bo buffer object to be unpinned
992 *
993 * Decreases the pin_count, and clears the flags if pin_count reaches 0.
994 * Changes placement and pin size accordingly.
995 *
996 * Returns:
997 * 0 for success or a negative error code on failure.
998 */
999void amdgpu_bo_unpin(struct amdgpu_bo *bo)
1000{
1001 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1002
1003 ttm_bo_unpin(&bo->tbo);
1004 if (bo->tbo.pin_count)
1005 return;
1006
1007 if (bo->tbo.base.import_attach)
1008 dma_buf_unpin(bo->tbo.base.import_attach);
1009
1010 if (bo->tbo.resource->mem_type == TTM_PL_VRAM) {
1011 atomic64_sub(amdgpu_bo_size(bo), &adev->vram_pin_size);
1012 atomic64_sub(amdgpu_vram_mgr_bo_visible_size(bo),
1013 &adev->visible_pin_size);
1014 } else if (bo->tbo.resource->mem_type == TTM_PL_TT) {
1015 atomic64_sub(amdgpu_bo_size(bo), &adev->gart_pin_size);
1016 }
1017}
1018
1019static const char *amdgpu_vram_names[] = {
1020 "UNKNOWN",
1021 "GDDR1",
1022 "DDR2",
1023 "GDDR3",
1024 "GDDR4",
1025 "GDDR5",
1026 "HBM",
1027 "DDR3",
1028 "DDR4",
1029 "GDDR6",
1030 "DDR5",
1031 "LPDDR4",
1032 "LPDDR5"
1033};
1034
1035/**
1036 * amdgpu_bo_init - initialize memory manager
1037 * @adev: amdgpu device object
1038 *
1039 * Calls amdgpu_ttm_init() to initialize amdgpu memory manager.
1040 *
1041 * Returns:
1042 * 0 for success or a negative error code on failure.
1043 */
1044int amdgpu_bo_init(struct amdgpu_device *adev)
1045{
1046 /* On A+A platform, VRAM can be mapped as WB */
1047 if (!adev->gmc.xgmi.connected_to_cpu) {
1048 /* reserve PAT memory space to WC for VRAM */
1049 int r = arch_io_reserve_memtype_wc(adev->gmc.aper_base,
1050 adev->gmc.aper_size);
1051
1052 if (r) {
1053 DRM_ERROR("Unable to set WC memtype for the aperture base\n");
1054 return r;
1055 }
1056
1057 /* Add an MTRR for the VRAM */
1058 adev->gmc.vram_mtrr = arch_phys_wc_add(adev->gmc.aper_base,
1059 adev->gmc.aper_size);
1060 }
1061
1062 DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
1063 adev->gmc.mc_vram_size >> 20,
1064 (unsigned long long)adev->gmc.aper_size >> 20);
1065 DRM_INFO("RAM width %dbits %s\n",
1066 adev->gmc.vram_width, amdgpu_vram_names[adev->gmc.vram_type]);
1067 return amdgpu_ttm_init(adev);
1068}
1069
1070/**
1071 * amdgpu_bo_fini - tear down memory manager
1072 * @adev: amdgpu device object
1073 *
1074 * Reverses amdgpu_bo_init() to tear down memory manager.
1075 */
1076void amdgpu_bo_fini(struct amdgpu_device *adev)
1077{
1078 int idx;
1079
1080 amdgpu_ttm_fini(adev);
1081
1082 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
1083
1084 if (!adev->gmc.xgmi.connected_to_cpu) {
1085 arch_phys_wc_del(adev->gmc.vram_mtrr);
1086 arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
1087 }
1088 drm_dev_exit(idx);
1089 }
1090}
1091
1092/**
1093 * amdgpu_bo_set_tiling_flags - set tiling flags
1094 * @bo: &amdgpu_bo buffer object
1095 * @tiling_flags: new flags
1096 *
1097 * Sets buffer object's tiling flags with the new one. Used by GEM ioctl or
1098 * kernel driver to set the tiling flags on a buffer.
1099 *
1100 * Returns:
1101 * 0 for success or a negative error code on failure.
1102 */
1103int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
1104{
1105 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1106 struct amdgpu_bo_user *ubo;
1107
1108 BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
1109 if (adev->family <= AMDGPU_FAMILY_CZ &&
1110 AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6)
1111 return -EINVAL;
1112
1113 ubo = to_amdgpu_bo_user(bo);
1114 ubo->tiling_flags = tiling_flags;
1115 return 0;
1116}
1117
1118/**
1119 * amdgpu_bo_get_tiling_flags - get tiling flags
1120 * @bo: &amdgpu_bo buffer object
1121 * @tiling_flags: returned flags
1122 *
1123 * Gets buffer object's tiling flags. Used by GEM ioctl or kernel driver to
1124 * set the tiling flags on a buffer.
1125 */
1126void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
1127{
1128 struct amdgpu_bo_user *ubo;
1129
1130 BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
1131 dma_resv_assert_held(bo->tbo.base.resv);
1132 ubo = to_amdgpu_bo_user(bo);
1133
1134 if (tiling_flags)
1135 *tiling_flags = ubo->tiling_flags;
1136}
1137
1138/**
1139 * amdgpu_bo_set_metadata - set metadata
1140 * @bo: &amdgpu_bo buffer object
1141 * @metadata: new metadata
1142 * @metadata_size: size of the new metadata
1143 * @flags: flags of the new metadata
1144 *
1145 * Sets buffer object's metadata, its size and flags.
1146 * Used via GEM ioctl.
1147 *
1148 * Returns:
1149 * 0 for success or a negative error code on failure.
1150 */
1151int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
1152 uint32_t metadata_size, uint64_t flags)
1153{
1154 struct amdgpu_bo_user *ubo;
1155 void *buffer;
1156
1157 BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
1158 ubo = to_amdgpu_bo_user(bo);
1159 if (!metadata_size) {
1160 if (ubo->metadata_size) {
1161 kfree(ubo->metadata);
1162 ubo->metadata = NULL;
1163 ubo->metadata_size = 0;
1164 }
1165 return 0;
1166 }
1167
1168 if (metadata == NULL)
1169 return -EINVAL;
1170
1171 buffer = kmemdup(metadata, metadata_size, GFP_KERNEL);
1172 if (buffer == NULL)
1173 return -ENOMEM;
1174
1175 kfree(ubo->metadata);
1176 ubo->metadata_flags = flags;
1177 ubo->metadata = buffer;
1178 ubo->metadata_size = metadata_size;
1179
1180 return 0;
1181}
1182
1183/**
1184 * amdgpu_bo_get_metadata - get metadata
1185 * @bo: &amdgpu_bo buffer object
1186 * @buffer: returned metadata
1187 * @buffer_size: size of the buffer
1188 * @metadata_size: size of the returned metadata
1189 * @flags: flags of the returned metadata
1190 *
1191 * Gets buffer object's metadata, its size and flags. buffer_size shall not be
1192 * less than metadata_size.
1193 * Used via GEM ioctl.
1194 *
1195 * Returns:
1196 * 0 for success or a negative error code on failure.
1197 */
1198int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
1199 size_t buffer_size, uint32_t *metadata_size,
1200 uint64_t *flags)
1201{
1202 struct amdgpu_bo_user *ubo;
1203
1204 if (!buffer && !metadata_size)
1205 return -EINVAL;
1206
1207 BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
1208 ubo = to_amdgpu_bo_user(bo);
1209 if (metadata_size)
1210 *metadata_size = ubo->metadata_size;
1211
1212 if (buffer) {
1213 if (buffer_size < ubo->metadata_size)
1214 return -EINVAL;
1215
1216 if (ubo->metadata_size)
1217 memcpy(buffer, ubo->metadata, ubo->metadata_size);
1218 }
1219
1220 if (flags)
1221 *flags = ubo->metadata_flags;
1222
1223 return 0;
1224}
1225
1226/**
1227 * amdgpu_bo_move_notify - notification about a memory move
1228 * @bo: pointer to a buffer object
1229 * @evict: if this move is evicting the buffer from the graphics address space
1230 * @new_mem: new information of the bufer object
1231 *
1232 * Marks the corresponding &amdgpu_bo buffer object as invalid, also performs
1233 * bookkeeping.
1234 * TTM driver callback which is called when ttm moves a buffer.
1235 */
1236void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
1237 bool evict,
1238 struct ttm_resource *new_mem)
1239{
1240 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1241 struct amdgpu_bo *abo;
1242 struct ttm_resource *old_mem = bo->resource;
1243
1244 if (!amdgpu_bo_is_amdgpu_bo(bo))
1245 return;
1246
1247 abo = ttm_to_amdgpu_bo(bo);
1248 amdgpu_vm_bo_invalidate(adev, abo, evict);
1249
1250 amdgpu_bo_kunmap(abo);
1251
1252 if (abo->tbo.base.dma_buf && !abo->tbo.base.import_attach &&
1253 bo->resource->mem_type != TTM_PL_SYSTEM)
1254 dma_buf_move_notify(abo->tbo.base.dma_buf);
1255
1256 /* remember the eviction */
1257 if (evict)
1258 atomic64_inc(&adev->num_evictions);
1259
1260 /* update statistics */
1261 if (!new_mem)
1262 return;
1263
1264 /* move_notify is called before move happens */
1265 trace_amdgpu_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
1266}
1267
1268void amdgpu_bo_get_memory(struct amdgpu_bo *bo, uint64_t *vram_mem,
1269 uint64_t *gtt_mem, uint64_t *cpu_mem)
1270{
1271 unsigned int domain;
1272
1273 domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
1274 switch (domain) {
1275 case AMDGPU_GEM_DOMAIN_VRAM:
1276 *vram_mem += amdgpu_bo_size(bo);
1277 break;
1278 case AMDGPU_GEM_DOMAIN_GTT:
1279 *gtt_mem += amdgpu_bo_size(bo);
1280 break;
1281 case AMDGPU_GEM_DOMAIN_CPU:
1282 default:
1283 *cpu_mem += amdgpu_bo_size(bo);
1284 break;
1285 }
1286}
1287
1288/**
1289 * amdgpu_bo_release_notify - notification about a BO being released
1290 * @bo: pointer to a buffer object
1291 *
1292 * Wipes VRAM buffers whose contents should not be leaked before the
1293 * memory is released.
1294 */
1295void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
1296{
1297 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1298 struct dma_fence *fence = NULL;
1299 struct amdgpu_bo *abo;
1300 int r;
1301
1302 if (!amdgpu_bo_is_amdgpu_bo(bo))
1303 return;
1304
1305 abo = ttm_to_amdgpu_bo(bo);
1306
1307 if (abo->kfd_bo)
1308 amdgpu_amdkfd_release_notify(abo);
1309
1310 /* We only remove the fence if the resv has individualized. */
1311 WARN_ON_ONCE(bo->type == ttm_bo_type_kernel
1312 && bo->base.resv != &bo->base._resv);
1313 if (bo->base.resv == &bo->base._resv)
1314 amdgpu_amdkfd_remove_fence_on_pt_pd_bos(abo);
1315
1316 if (!bo->resource || bo->resource->mem_type != TTM_PL_VRAM ||
1317 !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE) ||
1318 adev->in_suspend || adev->shutdown)
1319 return;
1320
1321 if (WARN_ON_ONCE(!dma_resv_trylock(bo->base.resv)))
1322 return;
1323
1324 r = amdgpu_fill_buffer(abo, AMDGPU_POISON, bo->base.resv, &fence);
1325 if (!WARN_ON(r)) {
1326 amdgpu_bo_fence(abo, fence, false);
1327 dma_fence_put(fence);
1328 }
1329
1330 dma_resv_unlock(bo->base.resv);
1331}
1332
1333/**
1334 * amdgpu_bo_fault_reserve_notify - notification about a memory fault
1335 * @bo: pointer to a buffer object
1336 *
1337 * Notifies the driver we are taking a fault on this BO and have reserved it,
1338 * also performs bookkeeping.
1339 * TTM driver callback for dealing with vm faults.
1340 *
1341 * Returns:
1342 * 0 for success or a negative error code on failure.
1343 */
1344vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
1345{
1346 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1347 struct ttm_operation_ctx ctx = { false, false };
1348 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1349 unsigned long offset;
1350 int r;
1351
1352 /* Remember that this BO was accessed by the CPU */
1353 abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
1354
1355 if (bo->resource->mem_type != TTM_PL_VRAM)
1356 return 0;
1357
1358 offset = bo->resource->start << PAGE_SHIFT;
1359 if ((offset + bo->base.size) <= adev->gmc.visible_vram_size)
1360 return 0;
1361
1362 /* Can't move a pinned BO to visible VRAM */
1363 if (abo->tbo.pin_count > 0)
1364 return VM_FAULT_SIGBUS;
1365
1366 /* hurrah the memory is not visible ! */
1367 atomic64_inc(&adev->num_vram_cpu_page_faults);
1368 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
1369 AMDGPU_GEM_DOMAIN_GTT);
1370
1371 /* Avoid costly evictions; only set GTT as a busy placement */
1372 abo->placement.num_busy_placement = 1;
1373 abo->placement.busy_placement = &abo->placements[1];
1374
1375 r = ttm_bo_validate(bo, &abo->placement, &ctx);
1376 if (unlikely(r == -EBUSY || r == -ERESTARTSYS))
1377 return VM_FAULT_NOPAGE;
1378 else if (unlikely(r))
1379 return VM_FAULT_SIGBUS;
1380
1381 offset = bo->resource->start << PAGE_SHIFT;
1382 /* this should never happen */
1383 if (bo->resource->mem_type == TTM_PL_VRAM &&
1384 (offset + bo->base.size) > adev->gmc.visible_vram_size)
1385 return VM_FAULT_SIGBUS;
1386
1387 ttm_bo_move_to_lru_tail_unlocked(bo);
1388 return 0;
1389}
1390
1391/**
1392 * amdgpu_bo_fence - add fence to buffer object
1393 *
1394 * @bo: buffer object in question
1395 * @fence: fence to add
1396 * @shared: true if fence should be added shared
1397 *
1398 */
1399void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
1400 bool shared)
1401{
1402 struct dma_resv *resv = bo->tbo.base.resv;
1403 int r;
1404
1405 r = dma_resv_reserve_fences(resv, 1);
1406 if (r) {
1407 /* As last resort on OOM we block for the fence */
1408 dma_fence_wait(fence, false);
1409 return;
1410 }
1411
1412 dma_resv_add_fence(resv, fence, shared ? DMA_RESV_USAGE_READ :
1413 DMA_RESV_USAGE_WRITE);
1414}
1415
1416/**
1417 * amdgpu_bo_sync_wait_resv - Wait for BO reservation fences
1418 *
1419 * @adev: amdgpu device pointer
1420 * @resv: reservation object to sync to
1421 * @sync_mode: synchronization mode
1422 * @owner: fence owner
1423 * @intr: Whether the wait is interruptible
1424 *
1425 * Extract the fences from the reservation object and waits for them to finish.
1426 *
1427 * Returns:
1428 * 0 on success, errno otherwise.
1429 */
1430int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
1431 enum amdgpu_sync_mode sync_mode, void *owner,
1432 bool intr)
1433{
1434 struct amdgpu_sync sync;
1435 int r;
1436
1437 amdgpu_sync_create(&sync);
1438 amdgpu_sync_resv(adev, &sync, resv, sync_mode, owner);
1439 r = amdgpu_sync_wait(&sync, intr);
1440 amdgpu_sync_free(&sync);
1441 return r;
1442}
1443
1444/**
1445 * amdgpu_bo_sync_wait - Wrapper for amdgpu_bo_sync_wait_resv
1446 * @bo: buffer object to wait for
1447 * @owner: fence owner
1448 * @intr: Whether the wait is interruptible
1449 *
1450 * Wrapper to wait for fences in a BO.
1451 * Returns:
1452 * 0 on success, errno otherwise.
1453 */
1454int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
1455{
1456 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1457
1458 return amdgpu_bo_sync_wait_resv(adev, bo->tbo.base.resv,
1459 AMDGPU_SYNC_NE_OWNER, owner, intr);
1460}
1461
1462/**
1463 * amdgpu_bo_gpu_offset - return GPU offset of bo
1464 * @bo: amdgpu object for which we query the offset
1465 *
1466 * Note: object should either be pinned or reserved when calling this
1467 * function, it might be useful to add check for this for debugging.
1468 *
1469 * Returns:
1470 * current GPU offset of the object.
1471 */
1472u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
1473{
1474 WARN_ON_ONCE(bo->tbo.resource->mem_type == TTM_PL_SYSTEM);
1475 WARN_ON_ONCE(!dma_resv_is_locked(bo->tbo.base.resv) &&
1476 !bo->tbo.pin_count && bo->tbo.type != ttm_bo_type_kernel);
1477 WARN_ON_ONCE(bo->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET);
1478 WARN_ON_ONCE(bo->tbo.resource->mem_type == TTM_PL_VRAM &&
1479 !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
1480
1481 return amdgpu_bo_gpu_offset_no_check(bo);
1482}
1483
1484/**
1485 * amdgpu_bo_gpu_offset_no_check - return GPU offset of bo
1486 * @bo: amdgpu object for which we query the offset
1487 *
1488 * Returns:
1489 * current GPU offset of the object without raising warnings.
1490 */
1491u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo)
1492{
1493 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1494 uint64_t offset;
1495
1496 offset = (bo->tbo.resource->start << PAGE_SHIFT) +
1497 amdgpu_ttm_domain_start(adev, bo->tbo.resource->mem_type);
1498
1499 return amdgpu_gmc_sign_extend(offset);
1500}
1501
1502/**
1503 * amdgpu_bo_get_preferred_domain - get preferred domain
1504 * @adev: amdgpu device object
1505 * @domain: allowed :ref:`memory domains <amdgpu_memory_domains>`
1506 *
1507 * Returns:
1508 * Which of the allowed domains is preferred for allocating the BO.
1509 */
1510uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
1511 uint32_t domain)
1512{
1513 if ((domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) &&
1514 ((adev->asic_type == CHIP_CARRIZO) || (adev->asic_type == CHIP_STONEY))) {
1515 domain = AMDGPU_GEM_DOMAIN_VRAM;
1516 if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD)
1517 domain = AMDGPU_GEM_DOMAIN_GTT;
1518 }
1519 return domain;
1520}
1521
1522#if defined(CONFIG_DEBUG_FS)
1523#define amdgpu_bo_print_flag(m, bo, flag) \
1524 do { \
1525 if (bo->flags & (AMDGPU_GEM_CREATE_ ## flag)) { \
1526 seq_printf((m), " " #flag); \
1527 } \
1528 } while (0)
1529
1530/**
1531 * amdgpu_bo_print_info - print BO info in debugfs file
1532 *
1533 * @id: Index or Id of the BO
1534 * @bo: Requested BO for printing info
1535 * @m: debugfs file
1536 *
1537 * Print BO information in debugfs file
1538 *
1539 * Returns:
1540 * Size of the BO in bytes.
1541 */
1542u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
1543{
1544 struct dma_buf_attachment *attachment;
1545 struct dma_buf *dma_buf;
1546 unsigned int domain;
1547 const char *placement;
1548 unsigned int pin_count;
1549 u64 size;
1550
1551 domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
1552 switch (domain) {
1553 case AMDGPU_GEM_DOMAIN_VRAM:
1554 placement = "VRAM";
1555 break;
1556 case AMDGPU_GEM_DOMAIN_GTT:
1557 placement = " GTT";
1558 break;
1559 case AMDGPU_GEM_DOMAIN_CPU:
1560 default:
1561 placement = " CPU";
1562 break;
1563 }
1564
1565 size = amdgpu_bo_size(bo);
1566 seq_printf(m, "\t\t0x%08x: %12lld byte %s",
1567 id, size, placement);
1568
1569 pin_count = READ_ONCE(bo->tbo.pin_count);
1570 if (pin_count)
1571 seq_printf(m, " pin count %d", pin_count);
1572
1573 dma_buf = READ_ONCE(bo->tbo.base.dma_buf);
1574 attachment = READ_ONCE(bo->tbo.base.import_attach);
1575
1576 if (attachment)
1577 seq_printf(m, " imported from %p", dma_buf);
1578 else if (dma_buf)
1579 seq_printf(m, " exported as %p", dma_buf);
1580
1581 amdgpu_bo_print_flag(m, bo, CPU_ACCESS_REQUIRED);
1582 amdgpu_bo_print_flag(m, bo, NO_CPU_ACCESS);
1583 amdgpu_bo_print_flag(m, bo, CPU_GTT_USWC);
1584 amdgpu_bo_print_flag(m, bo, VRAM_CLEARED);
1585 amdgpu_bo_print_flag(m, bo, VRAM_CONTIGUOUS);
1586 amdgpu_bo_print_flag(m, bo, VM_ALWAYS_VALID);
1587 amdgpu_bo_print_flag(m, bo, EXPLICIT_SYNC);
1588
1589 seq_puts(m, "\n");
1590
1591 return size;
1592}
1593#endif
1/*
2 * Copyright 2009 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30 * Dave Airlie
31 */
32#include <linux/list.h>
33#include <linux/slab.h>
34#include <drm/drmP.h>
35#include <drm/amdgpu_drm.h>
36#include <drm/drm_cache.h>
37#include "amdgpu.h"
38#include "amdgpu_trace.h"
39
40
41int amdgpu_ttm_init(struct amdgpu_device *adev);
42void amdgpu_ttm_fini(struct amdgpu_device *adev);
43
44static u64 amdgpu_get_vis_part_size(struct amdgpu_device *adev,
45 struct ttm_mem_reg *mem)
46{
47 u64 ret = 0;
48 if (mem->start << PAGE_SHIFT < adev->mc.visible_vram_size) {
49 ret = (u64)((mem->start << PAGE_SHIFT) + mem->size) >
50 adev->mc.visible_vram_size ?
51 adev->mc.visible_vram_size - (mem->start << PAGE_SHIFT) :
52 mem->size;
53 }
54 return ret;
55}
56
57static void amdgpu_update_memory_usage(struct amdgpu_device *adev,
58 struct ttm_mem_reg *old_mem,
59 struct ttm_mem_reg *new_mem)
60{
61 u64 vis_size;
62 if (!adev)
63 return;
64
65 if (new_mem) {
66 switch (new_mem->mem_type) {
67 case TTM_PL_TT:
68 atomic64_add(new_mem->size, &adev->gtt_usage);
69 break;
70 case TTM_PL_VRAM:
71 atomic64_add(new_mem->size, &adev->vram_usage);
72 vis_size = amdgpu_get_vis_part_size(adev, new_mem);
73 atomic64_add(vis_size, &adev->vram_vis_usage);
74 break;
75 }
76 }
77
78 if (old_mem) {
79 switch (old_mem->mem_type) {
80 case TTM_PL_TT:
81 atomic64_sub(old_mem->size, &adev->gtt_usage);
82 break;
83 case TTM_PL_VRAM:
84 atomic64_sub(old_mem->size, &adev->vram_usage);
85 vis_size = amdgpu_get_vis_part_size(adev, old_mem);
86 atomic64_sub(vis_size, &adev->vram_vis_usage);
87 break;
88 }
89 }
90}
91
92static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
93{
94 struct amdgpu_bo *bo;
95
96 bo = container_of(tbo, struct amdgpu_bo, tbo);
97
98 amdgpu_update_memory_usage(bo->adev, &bo->tbo.mem, NULL);
99
100 drm_gem_object_release(&bo->gem_base);
101 amdgpu_bo_unref(&bo->parent);
102 kfree(bo->metadata);
103 kfree(bo);
104}
105
106bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
107{
108 if (bo->destroy == &amdgpu_ttm_bo_destroy)
109 return true;
110 return false;
111}
112
113static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
114 struct ttm_placement *placement,
115 struct ttm_place *placements,
116 u32 domain, u64 flags)
117{
118 u32 c = 0, i;
119
120 placement->placement = placements;
121 placement->busy_placement = placements;
122
123 if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
124 if (flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS &&
125 adev->mc.visible_vram_size < adev->mc.real_vram_size) {
126 placements[c].fpfn =
127 adev->mc.visible_vram_size >> PAGE_SHIFT;
128 placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
129 TTM_PL_FLAG_VRAM | TTM_PL_FLAG_TOPDOWN;
130 }
131 placements[c].fpfn = 0;
132 placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
133 TTM_PL_FLAG_VRAM;
134 if (!(flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED))
135 placements[c - 1].flags |= TTM_PL_FLAG_TOPDOWN;
136 }
137
138 if (domain & AMDGPU_GEM_DOMAIN_GTT) {
139 if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) {
140 placements[c].fpfn = 0;
141 placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT |
142 TTM_PL_FLAG_UNCACHED;
143 } else {
144 placements[c].fpfn = 0;
145 placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
146 }
147 }
148
149 if (domain & AMDGPU_GEM_DOMAIN_CPU) {
150 if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) {
151 placements[c].fpfn = 0;
152 placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_SYSTEM |
153 TTM_PL_FLAG_UNCACHED;
154 } else {
155 placements[c].fpfn = 0;
156 placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM;
157 }
158 }
159
160 if (domain & AMDGPU_GEM_DOMAIN_GDS) {
161 placements[c].fpfn = 0;
162 placements[c++].flags = TTM_PL_FLAG_UNCACHED |
163 AMDGPU_PL_FLAG_GDS;
164 }
165 if (domain & AMDGPU_GEM_DOMAIN_GWS) {
166 placements[c].fpfn = 0;
167 placements[c++].flags = TTM_PL_FLAG_UNCACHED |
168 AMDGPU_PL_FLAG_GWS;
169 }
170 if (domain & AMDGPU_GEM_DOMAIN_OA) {
171 placements[c].fpfn = 0;
172 placements[c++].flags = TTM_PL_FLAG_UNCACHED |
173 AMDGPU_PL_FLAG_OA;
174 }
175
176 if (!c) {
177 placements[c].fpfn = 0;
178 placements[c++].flags = TTM_PL_MASK_CACHING |
179 TTM_PL_FLAG_SYSTEM;
180 }
181 placement->num_placement = c;
182 placement->num_busy_placement = c;
183
184 for (i = 0; i < c; i++) {
185 if ((flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
186 (placements[i].flags & TTM_PL_FLAG_VRAM) &&
187 !placements[i].fpfn)
188 placements[i].lpfn =
189 adev->mc.visible_vram_size >> PAGE_SHIFT;
190 else
191 placements[i].lpfn = 0;
192 }
193}
194
195void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain)
196{
197 amdgpu_ttm_placement_init(rbo->adev, &rbo->placement,
198 rbo->placements, domain, rbo->flags);
199}
200
201static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo,
202 struct ttm_placement *placement)
203{
204 BUG_ON(placement->num_placement > (AMDGPU_GEM_DOMAIN_MAX + 1));
205
206 memcpy(bo->placements, placement->placement,
207 placement->num_placement * sizeof(struct ttm_place));
208 bo->placement.num_placement = placement->num_placement;
209 bo->placement.num_busy_placement = placement->num_busy_placement;
210 bo->placement.placement = bo->placements;
211 bo->placement.busy_placement = bo->placements;
212}
213
214int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
215 unsigned long size, int byte_align,
216 bool kernel, u32 domain, u64 flags,
217 struct sg_table *sg,
218 struct ttm_placement *placement,
219 struct reservation_object *resv,
220 struct amdgpu_bo **bo_ptr)
221{
222 struct amdgpu_bo *bo;
223 enum ttm_bo_type type;
224 unsigned long page_align;
225 size_t acc_size;
226 int r;
227
228 page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
229 size = ALIGN(size, PAGE_SIZE);
230
231 if (kernel) {
232 type = ttm_bo_type_kernel;
233 } else if (sg) {
234 type = ttm_bo_type_sg;
235 } else {
236 type = ttm_bo_type_device;
237 }
238 *bo_ptr = NULL;
239
240 acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
241 sizeof(struct amdgpu_bo));
242
243 bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
244 if (bo == NULL)
245 return -ENOMEM;
246 r = drm_gem_object_init(adev->ddev, &bo->gem_base, size);
247 if (unlikely(r)) {
248 kfree(bo);
249 return r;
250 }
251 bo->adev = adev;
252 INIT_LIST_HEAD(&bo->list);
253 INIT_LIST_HEAD(&bo->va);
254 bo->prefered_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM |
255 AMDGPU_GEM_DOMAIN_GTT |
256 AMDGPU_GEM_DOMAIN_CPU |
257 AMDGPU_GEM_DOMAIN_GDS |
258 AMDGPU_GEM_DOMAIN_GWS |
259 AMDGPU_GEM_DOMAIN_OA);
260 bo->allowed_domains = bo->prefered_domains;
261 if (!kernel && bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
262 bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
263
264 bo->flags = flags;
265
266 /* For architectures that don't support WC memory,
267 * mask out the WC flag from the BO
268 */
269 if (!drm_arch_can_wc_memory())
270 bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
271
272 amdgpu_fill_placement_to_bo(bo, placement);
273 /* Kernel allocation are uninterruptible */
274 r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type,
275 &bo->placement, page_align, !kernel, NULL,
276 acc_size, sg, resv, &amdgpu_ttm_bo_destroy);
277 if (unlikely(r != 0)) {
278 return r;
279 }
280 *bo_ptr = bo;
281
282 trace_amdgpu_bo_create(bo);
283
284 return 0;
285}
286
287int amdgpu_bo_create(struct amdgpu_device *adev,
288 unsigned long size, int byte_align,
289 bool kernel, u32 domain, u64 flags,
290 struct sg_table *sg,
291 struct reservation_object *resv,
292 struct amdgpu_bo **bo_ptr)
293{
294 struct ttm_placement placement = {0};
295 struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
296
297 memset(&placements, 0,
298 (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place));
299
300 amdgpu_ttm_placement_init(adev, &placement,
301 placements, domain, flags);
302
303 return amdgpu_bo_create_restricted(adev, size, byte_align, kernel,
304 domain, flags, sg, &placement,
305 resv, bo_ptr);
306}
307
308int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
309{
310 bool is_iomem;
311 long r;
312
313 if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
314 return -EPERM;
315
316 if (bo->kptr) {
317 if (ptr) {
318 *ptr = bo->kptr;
319 }
320 return 0;
321 }
322
323 r = reservation_object_wait_timeout_rcu(bo->tbo.resv, false, false,
324 MAX_SCHEDULE_TIMEOUT);
325 if (r < 0)
326 return r;
327
328 r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
329 if (r)
330 return r;
331
332 bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
333 if (ptr)
334 *ptr = bo->kptr;
335
336 return 0;
337}
338
339void amdgpu_bo_kunmap(struct amdgpu_bo *bo)
340{
341 if (bo->kptr == NULL)
342 return;
343 bo->kptr = NULL;
344 ttm_bo_kunmap(&bo->kmap);
345}
346
347struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
348{
349 if (bo == NULL)
350 return NULL;
351
352 ttm_bo_reference(&bo->tbo);
353 return bo;
354}
355
356void amdgpu_bo_unref(struct amdgpu_bo **bo)
357{
358 struct ttm_buffer_object *tbo;
359
360 if ((*bo) == NULL)
361 return;
362
363 tbo = &((*bo)->tbo);
364 ttm_bo_unref(&tbo);
365 if (tbo == NULL)
366 *bo = NULL;
367}
368
369int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
370 u64 min_offset, u64 max_offset,
371 u64 *gpu_addr)
372{
373 int r, i;
374 unsigned fpfn, lpfn;
375
376 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
377 return -EPERM;
378
379 if (WARN_ON_ONCE(min_offset > max_offset))
380 return -EINVAL;
381
382 if (bo->pin_count) {
383 bo->pin_count++;
384 if (gpu_addr)
385 *gpu_addr = amdgpu_bo_gpu_offset(bo);
386
387 if (max_offset != 0) {
388 u64 domain_start;
389 if (domain == AMDGPU_GEM_DOMAIN_VRAM)
390 domain_start = bo->adev->mc.vram_start;
391 else
392 domain_start = bo->adev->mc.gtt_start;
393 WARN_ON_ONCE(max_offset <
394 (amdgpu_bo_gpu_offset(bo) - domain_start));
395 }
396
397 return 0;
398 }
399 amdgpu_ttm_placement_from_domain(bo, domain);
400 for (i = 0; i < bo->placement.num_placement; i++) {
401 /* force to pin into visible video ram */
402 if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
403 !(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) &&
404 (!max_offset || max_offset > bo->adev->mc.visible_vram_size)) {
405 if (WARN_ON_ONCE(min_offset >
406 bo->adev->mc.visible_vram_size))
407 return -EINVAL;
408 fpfn = min_offset >> PAGE_SHIFT;
409 lpfn = bo->adev->mc.visible_vram_size >> PAGE_SHIFT;
410 } else {
411 fpfn = min_offset >> PAGE_SHIFT;
412 lpfn = max_offset >> PAGE_SHIFT;
413 }
414 if (fpfn > bo->placements[i].fpfn)
415 bo->placements[i].fpfn = fpfn;
416 if (!bo->placements[i].lpfn ||
417 (lpfn && lpfn < bo->placements[i].lpfn))
418 bo->placements[i].lpfn = lpfn;
419 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
420 }
421
422 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
423 if (likely(r == 0)) {
424 bo->pin_count = 1;
425 if (gpu_addr != NULL)
426 *gpu_addr = amdgpu_bo_gpu_offset(bo);
427 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
428 bo->adev->vram_pin_size += amdgpu_bo_size(bo);
429 if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
430 bo->adev->invisible_pin_size += amdgpu_bo_size(bo);
431 } else
432 bo->adev->gart_pin_size += amdgpu_bo_size(bo);
433 } else {
434 dev_err(bo->adev->dev, "%p pin failed\n", bo);
435 }
436 return r;
437}
438
439int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr)
440{
441 return amdgpu_bo_pin_restricted(bo, domain, 0, 0, gpu_addr);
442}
443
444int amdgpu_bo_unpin(struct amdgpu_bo *bo)
445{
446 int r, i;
447
448 if (!bo->pin_count) {
449 dev_warn(bo->adev->dev, "%p unpin not necessary\n", bo);
450 return 0;
451 }
452 bo->pin_count--;
453 if (bo->pin_count)
454 return 0;
455 for (i = 0; i < bo->placement.num_placement; i++) {
456 bo->placements[i].lpfn = 0;
457 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
458 }
459 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
460 if (likely(r == 0)) {
461 if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
462 bo->adev->vram_pin_size -= amdgpu_bo_size(bo);
463 if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
464 bo->adev->invisible_pin_size -= amdgpu_bo_size(bo);
465 } else
466 bo->adev->gart_pin_size -= amdgpu_bo_size(bo);
467 } else {
468 dev_err(bo->adev->dev, "%p validate failed for unpin\n", bo);
469 }
470 return r;
471}
472
473int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
474{
475 /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
476 if (0 && (adev->flags & AMD_IS_APU)) {
477 /* Useless to evict on IGP chips */
478 return 0;
479 }
480 return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM);
481}
482
483static const char *amdgpu_vram_names[] = {
484 "UNKNOWN",
485 "GDDR1",
486 "DDR2",
487 "GDDR3",
488 "GDDR4",
489 "GDDR5",
490 "HBM",
491 "DDR3"
492};
493
494int amdgpu_bo_init(struct amdgpu_device *adev)
495{
496 /* Add an MTRR for the VRAM */
497 adev->mc.vram_mtrr = arch_phys_wc_add(adev->mc.aper_base,
498 adev->mc.aper_size);
499 DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
500 adev->mc.mc_vram_size >> 20,
501 (unsigned long long)adev->mc.aper_size >> 20);
502 DRM_INFO("RAM width %dbits %s\n",
503 adev->mc.vram_width, amdgpu_vram_names[adev->mc.vram_type]);
504 return amdgpu_ttm_init(adev);
505}
506
507void amdgpu_bo_fini(struct amdgpu_device *adev)
508{
509 amdgpu_ttm_fini(adev);
510 arch_phys_wc_del(adev->mc.vram_mtrr);
511}
512
513int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
514 struct vm_area_struct *vma)
515{
516 return ttm_fbdev_mmap(vma, &bo->tbo);
517}
518
519int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
520{
521 if (AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6)
522 return -EINVAL;
523
524 bo->tiling_flags = tiling_flags;
525 return 0;
526}
527
528void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
529{
530 lockdep_assert_held(&bo->tbo.resv->lock.base);
531
532 if (tiling_flags)
533 *tiling_flags = bo->tiling_flags;
534}
535
536int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
537 uint32_t metadata_size, uint64_t flags)
538{
539 void *buffer;
540
541 if (!metadata_size) {
542 if (bo->metadata_size) {
543 kfree(bo->metadata);
544 bo->metadata = NULL;
545 bo->metadata_size = 0;
546 }
547 return 0;
548 }
549
550 if (metadata == NULL)
551 return -EINVAL;
552
553 buffer = kmemdup(metadata, metadata_size, GFP_KERNEL);
554 if (buffer == NULL)
555 return -ENOMEM;
556
557 kfree(bo->metadata);
558 bo->metadata_flags = flags;
559 bo->metadata = buffer;
560 bo->metadata_size = metadata_size;
561
562 return 0;
563}
564
565int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
566 size_t buffer_size, uint32_t *metadata_size,
567 uint64_t *flags)
568{
569 if (!buffer && !metadata_size)
570 return -EINVAL;
571
572 if (buffer) {
573 if (buffer_size < bo->metadata_size)
574 return -EINVAL;
575
576 if (bo->metadata_size)
577 memcpy(buffer, bo->metadata, bo->metadata_size);
578 }
579
580 if (metadata_size)
581 *metadata_size = bo->metadata_size;
582 if (flags)
583 *flags = bo->metadata_flags;
584
585 return 0;
586}
587
588void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
589 struct ttm_mem_reg *new_mem)
590{
591 struct amdgpu_bo *rbo;
592
593 if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
594 return;
595
596 rbo = container_of(bo, struct amdgpu_bo, tbo);
597 amdgpu_vm_bo_invalidate(rbo->adev, rbo);
598
599 /* update statistics */
600 if (!new_mem)
601 return;
602
603 /* move_notify is called before move happens */
604 amdgpu_update_memory_usage(rbo->adev, &bo->mem, new_mem);
605}
606
607int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
608{
609 struct amdgpu_device *adev;
610 struct amdgpu_bo *abo;
611 unsigned long offset, size, lpfn;
612 int i, r;
613
614 if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
615 return 0;
616
617 abo = container_of(bo, struct amdgpu_bo, tbo);
618 adev = abo->adev;
619 if (bo->mem.mem_type != TTM_PL_VRAM)
620 return 0;
621
622 size = bo->mem.num_pages << PAGE_SHIFT;
623 offset = bo->mem.start << PAGE_SHIFT;
624 if ((offset + size) <= adev->mc.visible_vram_size)
625 return 0;
626
627 /* Can't move a pinned BO to visible VRAM */
628 if (abo->pin_count > 0)
629 return -EINVAL;
630
631 /* hurrah the memory is not visible ! */
632 amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM);
633 lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
634 for (i = 0; i < abo->placement.num_placement; i++) {
635 /* Force into visible VRAM */
636 if ((abo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
637 (!abo->placements[i].lpfn || abo->placements[i].lpfn > lpfn))
638 abo->placements[i].lpfn = lpfn;
639 }
640 r = ttm_bo_validate(bo, &abo->placement, false, false);
641 if (unlikely(r == -ENOMEM)) {
642 amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
643 return ttm_bo_validate(bo, &abo->placement, false, false);
644 } else if (unlikely(r != 0)) {
645 return r;
646 }
647
648 offset = bo->mem.start << PAGE_SHIFT;
649 /* this should never happen */
650 if ((offset + size) > adev->mc.visible_vram_size)
651 return -EINVAL;
652
653 return 0;
654}
655
656/**
657 * amdgpu_bo_fence - add fence to buffer object
658 *
659 * @bo: buffer object in question
660 * @fence: fence to add
661 * @shared: true if fence should be added shared
662 *
663 */
664void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence,
665 bool shared)
666{
667 struct reservation_object *resv = bo->tbo.resv;
668
669 if (shared)
670 reservation_object_add_shared_fence(resv, fence);
671 else
672 reservation_object_add_excl_fence(resv, fence);
673}