Loading...
Note: File does not exist in v4.6.
1// SPDX-License-Identifier: GPL-2.0 OR MIT
2/*
3 * Copyright 2020-2021 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 */
23#include <linux/types.h>
24#include <linux/hmm.h>
25#include <linux/dma-direction.h>
26#include <linux/dma-mapping.h>
27#include <linux/migrate.h>
28#include "amdgpu_sync.h"
29#include "amdgpu_object.h"
30#include "amdgpu_vm.h"
31#include "amdgpu_res_cursor.h"
32#include "kfd_priv.h"
33#include "kfd_svm.h"
34#include "kfd_migrate.h"
35#include "kfd_smi_events.h"
36
37#ifdef dev_fmt
38#undef dev_fmt
39#endif
40#define dev_fmt(fmt) "kfd_migrate: " fmt
41
42static uint64_t
43svm_migrate_direct_mapping_addr(struct amdgpu_device *adev, uint64_t addr)
44{
45 return addr + amdgpu_ttm_domain_start(adev, TTM_PL_VRAM);
46}
47
48static int
49svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages,
50 dma_addr_t *addr, uint64_t *gart_addr, uint64_t flags)
51{
52 struct amdgpu_device *adev = ring->adev;
53 struct amdgpu_job *job;
54 unsigned int num_dw, num_bytes;
55 struct dma_fence *fence;
56 uint64_t src_addr, dst_addr;
57 uint64_t pte_flags;
58 void *cpu_addr;
59 int r;
60
61 /* use gart window 0 */
62 *gart_addr = adev->gmc.gart_start;
63
64 num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
65 num_bytes = npages * 8;
66
67 r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity,
68 AMDGPU_FENCE_OWNER_UNDEFINED,
69 num_dw * 4 + num_bytes,
70 AMDGPU_IB_POOL_DELAYED,
71 &job);
72 if (r)
73 return r;
74
75 src_addr = num_dw * 4;
76 src_addr += job->ibs[0].gpu_addr;
77
78 dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
79 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
80 dst_addr, num_bytes, false);
81
82 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
83 WARN_ON(job->ibs[0].length_dw > num_dw);
84
85 pte_flags = AMDGPU_PTE_VALID | AMDGPU_PTE_READABLE;
86 pte_flags |= AMDGPU_PTE_SYSTEM | AMDGPU_PTE_SNOOPED;
87 if (!(flags & KFD_IOCTL_SVM_FLAG_GPU_RO))
88 pte_flags |= AMDGPU_PTE_WRITEABLE;
89 pte_flags |= adev->gart.gart_pte_flags;
90
91 cpu_addr = &job->ibs[0].ptr[num_dw];
92
93 amdgpu_gart_map(adev, 0, npages, addr, pte_flags, cpu_addr);
94 fence = amdgpu_job_submit(job);
95 dma_fence_put(fence);
96
97 return r;
98}
99
100/**
101 * svm_migrate_copy_memory_gart - sdma copy data between ram and vram
102 *
103 * @adev: amdgpu device the sdma ring running
104 * @sys: system DMA pointer to be copied
105 * @vram: vram destination DMA pointer
106 * @npages: number of pages to copy
107 * @direction: enum MIGRATION_COPY_DIR
108 * @mfence: output, sdma fence to signal after sdma is done
109 *
110 * ram address uses GART table continuous entries mapping to ram pages,
111 * vram address uses direct mapping of vram pages, which must have npages
112 * number of continuous pages.
113 * GART update and sdma uses same buf copy function ring, sdma is splited to
114 * multiple GTT_MAX_PAGES transfer, all sdma operations are serialized, wait for
115 * the last sdma finish fence which is returned to check copy memory is done.
116 *
117 * Context: Process context, takes and releases gtt_window_lock
118 *
119 * Return:
120 * 0 - OK, otherwise error code
121 */
122
123static int
124svm_migrate_copy_memory_gart(struct amdgpu_device *adev, dma_addr_t *sys,
125 uint64_t *vram, uint64_t npages,
126 enum MIGRATION_COPY_DIR direction,
127 struct dma_fence **mfence)
128{
129 const uint64_t GTT_MAX_PAGES = AMDGPU_GTT_MAX_TRANSFER_SIZE;
130 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
131 uint64_t gart_s, gart_d;
132 struct dma_fence *next;
133 uint64_t size;
134 int r;
135
136 mutex_lock(&adev->mman.gtt_window_lock);
137
138 while (npages) {
139 size = min(GTT_MAX_PAGES, npages);
140
141 if (direction == FROM_VRAM_TO_RAM) {
142 gart_s = svm_migrate_direct_mapping_addr(adev, *vram);
143 r = svm_migrate_gart_map(ring, size, sys, &gart_d, 0);
144
145 } else if (direction == FROM_RAM_TO_VRAM) {
146 r = svm_migrate_gart_map(ring, size, sys, &gart_s,
147 KFD_IOCTL_SVM_FLAG_GPU_RO);
148 gart_d = svm_migrate_direct_mapping_addr(adev, *vram);
149 }
150 if (r) {
151 dev_err(adev->dev, "fail %d create gart mapping\n", r);
152 goto out_unlock;
153 }
154
155 r = amdgpu_copy_buffer(ring, gart_s, gart_d, size * PAGE_SIZE,
156 NULL, &next, false, true, false);
157 if (r) {
158 dev_err(adev->dev, "fail %d to copy memory\n", r);
159 goto out_unlock;
160 }
161
162 dma_fence_put(*mfence);
163 *mfence = next;
164 npages -= size;
165 if (npages) {
166 sys += size;
167 vram += size;
168 }
169 }
170
171out_unlock:
172 mutex_unlock(&adev->mman.gtt_window_lock);
173
174 return r;
175}
176
177/**
178 * svm_migrate_copy_done - wait for memory copy sdma is done
179 *
180 * @adev: amdgpu device the sdma memory copy is executing on
181 * @mfence: migrate fence
182 *
183 * Wait for dma fence is signaled, if the copy ssplit into multiple sdma
184 * operations, this is the last sdma operation fence.
185 *
186 * Context: called after svm_migrate_copy_memory
187 *
188 * Return:
189 * 0 - success
190 * otherwise - error code from dma fence signal
191 */
192static int
193svm_migrate_copy_done(struct amdgpu_device *adev, struct dma_fence *mfence)
194{
195 int r = 0;
196
197 if (mfence) {
198 r = dma_fence_wait(mfence, false);
199 dma_fence_put(mfence);
200 pr_debug("sdma copy memory fence done\n");
201 }
202
203 return r;
204}
205
206unsigned long
207svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr)
208{
209 return (addr + adev->kfd.dev->pgmap.range.start) >> PAGE_SHIFT;
210}
211
212static void
213svm_migrate_get_vram_page(struct svm_range *prange, unsigned long pfn)
214{
215 struct page *page;
216
217 page = pfn_to_page(pfn);
218 svm_range_bo_ref(prange->svm_bo);
219 page->zone_device_data = prange->svm_bo;
220 zone_device_page_init(page);
221}
222
223static void
224svm_migrate_put_vram_page(struct amdgpu_device *adev, unsigned long addr)
225{
226 struct page *page;
227
228 page = pfn_to_page(svm_migrate_addr_to_pfn(adev, addr));
229 unlock_page(page);
230 put_page(page);
231}
232
233static unsigned long
234svm_migrate_addr(struct amdgpu_device *adev, struct page *page)
235{
236 unsigned long addr;
237
238 addr = page_to_pfn(page) << PAGE_SHIFT;
239 return (addr - adev->kfd.dev->pgmap.range.start);
240}
241
242static struct page *
243svm_migrate_get_sys_page(struct vm_area_struct *vma, unsigned long addr)
244{
245 struct page *page;
246
247 page = alloc_page_vma(GFP_HIGHUSER, vma, addr);
248 if (page)
249 lock_page(page);
250
251 return page;
252}
253
254static void svm_migrate_put_sys_page(unsigned long addr)
255{
256 struct page *page;
257
258 page = pfn_to_page(addr >> PAGE_SHIFT);
259 unlock_page(page);
260 put_page(page);
261}
262
263static unsigned long svm_migrate_successful_pages(struct migrate_vma *migrate)
264{
265 unsigned long cpages = 0;
266 unsigned long i;
267
268 for (i = 0; i < migrate->npages; i++) {
269 if (migrate->src[i] & MIGRATE_PFN_VALID &&
270 migrate->src[i] & MIGRATE_PFN_MIGRATE)
271 cpages++;
272 }
273 return cpages;
274}
275
276static unsigned long svm_migrate_unsuccessful_pages(struct migrate_vma *migrate)
277{
278 unsigned long upages = 0;
279 unsigned long i;
280
281 for (i = 0; i < migrate->npages; i++) {
282 if (migrate->src[i] & MIGRATE_PFN_VALID &&
283 !(migrate->src[i] & MIGRATE_PFN_MIGRATE))
284 upages++;
285 }
286 return upages;
287}
288
289static int
290svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
291 struct migrate_vma *migrate, struct dma_fence **mfence,
292 dma_addr_t *scratch)
293{
294 uint64_t npages = migrate->npages;
295 struct device *dev = adev->dev;
296 struct amdgpu_res_cursor cursor;
297 dma_addr_t *src;
298 uint64_t *dst;
299 uint64_t i, j;
300 int r;
301
302 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start,
303 prange->last);
304
305 src = scratch;
306 dst = (uint64_t *)(scratch + npages);
307
308 r = svm_range_vram_node_new(adev, prange, true);
309 if (r) {
310 dev_dbg(adev->dev, "fail %d to alloc vram\n", r);
311 goto out;
312 }
313
314 amdgpu_res_first(prange->ttm_res, prange->offset << PAGE_SHIFT,
315 npages << PAGE_SHIFT, &cursor);
316 for (i = j = 0; i < npages; i++) {
317 struct page *spage;
318
319 dst[i] = cursor.start + (j << PAGE_SHIFT);
320 migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]);
321 svm_migrate_get_vram_page(prange, migrate->dst[i]);
322 migrate->dst[i] = migrate_pfn(migrate->dst[i]);
323
324 spage = migrate_pfn_to_page(migrate->src[i]);
325 if (spage && !is_zone_device_page(spage)) {
326 src[i] = dma_map_page(dev, spage, 0, PAGE_SIZE,
327 DMA_TO_DEVICE);
328 r = dma_mapping_error(dev, src[i]);
329 if (r) {
330 dev_err(adev->dev, "%s: fail %d dma_map_page\n",
331 __func__, r);
332 goto out_free_vram_pages;
333 }
334 } else {
335 if (j) {
336 r = svm_migrate_copy_memory_gart(
337 adev, src + i - j,
338 dst + i - j, j,
339 FROM_RAM_TO_VRAM,
340 mfence);
341 if (r)
342 goto out_free_vram_pages;
343 amdgpu_res_next(&cursor, (j + 1) << PAGE_SHIFT);
344 j = 0;
345 } else {
346 amdgpu_res_next(&cursor, PAGE_SIZE);
347 }
348 continue;
349 }
350
351 pr_debug_ratelimited("dma mapping src to 0x%llx, pfn 0x%lx\n",
352 src[i] >> PAGE_SHIFT, page_to_pfn(spage));
353
354 if (j >= (cursor.size >> PAGE_SHIFT) - 1 && i < npages - 1) {
355 r = svm_migrate_copy_memory_gart(adev, src + i - j,
356 dst + i - j, j + 1,
357 FROM_RAM_TO_VRAM,
358 mfence);
359 if (r)
360 goto out_free_vram_pages;
361 amdgpu_res_next(&cursor, (j + 1) * PAGE_SIZE);
362 j = 0;
363 } else {
364 j++;
365 }
366 }
367
368 r = svm_migrate_copy_memory_gart(adev, src + i - j, dst + i - j, j,
369 FROM_RAM_TO_VRAM, mfence);
370
371out_free_vram_pages:
372 if (r) {
373 pr_debug("failed %d to copy memory to vram\n", r);
374 while (i--) {
375 svm_migrate_put_vram_page(adev, dst[i]);
376 migrate->dst[i] = 0;
377 }
378 }
379
380#ifdef DEBUG_FORCE_MIXED_DOMAINS
381 for (i = 0, j = 0; i < npages; i += 4, j++) {
382 if (j & 1)
383 continue;
384 svm_migrate_put_vram_page(adev, dst[i]);
385 migrate->dst[i] = 0;
386 svm_migrate_put_vram_page(adev, dst[i + 1]);
387 migrate->dst[i + 1] = 0;
388 svm_migrate_put_vram_page(adev, dst[i + 2]);
389 migrate->dst[i + 2] = 0;
390 svm_migrate_put_vram_page(adev, dst[i + 3]);
391 migrate->dst[i + 3] = 0;
392 }
393#endif
394out:
395 return r;
396}
397
398static long
399svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
400 struct vm_area_struct *vma, uint64_t start,
401 uint64_t end, uint32_t trigger)
402{
403 struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
404 uint64_t npages = (end - start) >> PAGE_SHIFT;
405 struct kfd_process_device *pdd;
406 struct dma_fence *mfence = NULL;
407 struct migrate_vma migrate = { 0 };
408 unsigned long cpages = 0;
409 dma_addr_t *scratch;
410 void *buf;
411 int r = -ENOMEM;
412
413 memset(&migrate, 0, sizeof(migrate));
414 migrate.vma = vma;
415 migrate.start = start;
416 migrate.end = end;
417 migrate.flags = MIGRATE_VMA_SELECT_SYSTEM;
418 migrate.pgmap_owner = SVM_ADEV_PGMAP_OWNER(adev);
419
420 buf = kvcalloc(npages,
421 2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t),
422 GFP_KERNEL);
423 if (!buf)
424 goto out;
425
426 migrate.src = buf;
427 migrate.dst = migrate.src + npages;
428 scratch = (dma_addr_t *)(migrate.dst + npages);
429
430 kfd_smi_event_migration_start(adev->kfd.dev, p->lead_thread->pid,
431 start >> PAGE_SHIFT, end >> PAGE_SHIFT,
432 0, adev->kfd.dev->id, prange->prefetch_loc,
433 prange->preferred_loc, trigger);
434
435 r = migrate_vma_setup(&migrate);
436 if (r) {
437 dev_err(adev->dev, "%s: vma setup fail %d range [0x%lx 0x%lx]\n",
438 __func__, r, prange->start, prange->last);
439 goto out_free;
440 }
441
442 cpages = migrate.cpages;
443 if (!cpages) {
444 pr_debug("failed collect migrate sys pages [0x%lx 0x%lx]\n",
445 prange->start, prange->last);
446 goto out_free;
447 }
448 if (cpages != npages)
449 pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n",
450 cpages, npages);
451 else
452 pr_debug("0x%lx pages migrated\n", cpages);
453
454 r = svm_migrate_copy_to_vram(adev, prange, &migrate, &mfence, scratch);
455 migrate_vma_pages(&migrate);
456
457 pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n",
458 svm_migrate_successful_pages(&migrate), cpages, migrate.npages);
459
460 svm_migrate_copy_done(adev, mfence);
461 migrate_vma_finalize(&migrate);
462
463 kfd_smi_event_migration_end(adev->kfd.dev, p->lead_thread->pid,
464 start >> PAGE_SHIFT, end >> PAGE_SHIFT,
465 0, adev->kfd.dev->id, trigger);
466
467 svm_range_dma_unmap(adev->dev, scratch, 0, npages);
468 svm_range_free_dma_mappings(prange);
469
470out_free:
471 kvfree(buf);
472out:
473 if (!r && cpages) {
474 pdd = svm_range_get_pdd_by_adev(prange, adev);
475 if (pdd)
476 WRITE_ONCE(pdd->page_in, pdd->page_in + cpages);
477
478 return cpages;
479 }
480 return r;
481}
482
483/**
484 * svm_migrate_ram_to_vram - migrate svm range from system to device
485 * @prange: range structure
486 * @best_loc: the device to migrate to
487 * @mm: the process mm structure
488 * @trigger: reason of migration
489 *
490 * Context: Process context, caller hold mmap read lock, svms lock, prange lock
491 *
492 * Return:
493 * 0 - OK, otherwise error code
494 */
495static int
496svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
497 struct mm_struct *mm, uint32_t trigger)
498{
499 unsigned long addr, start, end;
500 struct vm_area_struct *vma;
501 struct amdgpu_device *adev;
502 unsigned long cpages = 0;
503 long r = 0;
504
505 if (prange->actual_loc == best_loc) {
506 pr_debug("svms 0x%p [0x%lx 0x%lx] already on best_loc 0x%x\n",
507 prange->svms, prange->start, prange->last, best_loc);
508 return 0;
509 }
510
511 adev = svm_range_get_adev_by_id(prange, best_loc);
512 if (!adev) {
513 pr_debug("failed to get device by id 0x%x\n", best_loc);
514 return -ENODEV;
515 }
516
517 pr_debug("svms 0x%p [0x%lx 0x%lx] to gpu 0x%x\n", prange->svms,
518 prange->start, prange->last, best_loc);
519
520 start = prange->start << PAGE_SHIFT;
521 end = (prange->last + 1) << PAGE_SHIFT;
522
523 for (addr = start; addr < end;) {
524 unsigned long next;
525
526 vma = vma_lookup(mm, addr);
527 if (!vma)
528 break;
529
530 next = min(vma->vm_end, end);
531 r = svm_migrate_vma_to_vram(adev, prange, vma, addr, next, trigger);
532 if (r < 0) {
533 pr_debug("failed %ld to migrate\n", r);
534 break;
535 } else {
536 cpages += r;
537 }
538 addr = next;
539 }
540
541 if (cpages)
542 prange->actual_loc = best_loc;
543
544 return r < 0 ? r : 0;
545}
546
547static void svm_migrate_page_free(struct page *page)
548{
549 struct svm_range_bo *svm_bo = page->zone_device_data;
550
551 if (svm_bo) {
552 pr_debug_ratelimited("ref: %d\n", kref_read(&svm_bo->kref));
553 svm_range_bo_unref_async(svm_bo);
554 }
555}
556
557static int
558svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
559 struct migrate_vma *migrate, struct dma_fence **mfence,
560 dma_addr_t *scratch, uint64_t npages)
561{
562 struct device *dev = adev->dev;
563 uint64_t *src;
564 dma_addr_t *dst;
565 struct page *dpage;
566 uint64_t i = 0, j;
567 uint64_t addr;
568 int r = 0;
569
570 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start,
571 prange->last);
572
573 addr = prange->start << PAGE_SHIFT;
574
575 src = (uint64_t *)(scratch + npages);
576 dst = scratch;
577
578 for (i = 0, j = 0; i < npages; i++, addr += PAGE_SIZE) {
579 struct page *spage;
580
581 spage = migrate_pfn_to_page(migrate->src[i]);
582 if (!spage || !is_zone_device_page(spage)) {
583 pr_debug("invalid page. Could be in CPU already svms 0x%p [0x%lx 0x%lx]\n",
584 prange->svms, prange->start, prange->last);
585 if (j) {
586 r = svm_migrate_copy_memory_gart(adev, dst + i - j,
587 src + i - j, j,
588 FROM_VRAM_TO_RAM,
589 mfence);
590 if (r)
591 goto out_oom;
592 j = 0;
593 }
594 continue;
595 }
596 src[i] = svm_migrate_addr(adev, spage);
597 if (j > 0 && src[i] != src[i - 1] + PAGE_SIZE) {
598 r = svm_migrate_copy_memory_gart(adev, dst + i - j,
599 src + i - j, j,
600 FROM_VRAM_TO_RAM,
601 mfence);
602 if (r)
603 goto out_oom;
604 j = 0;
605 }
606
607 dpage = svm_migrate_get_sys_page(migrate->vma, addr);
608 if (!dpage) {
609 pr_debug("failed get page svms 0x%p [0x%lx 0x%lx]\n",
610 prange->svms, prange->start, prange->last);
611 r = -ENOMEM;
612 goto out_oom;
613 }
614
615 dst[i] = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_FROM_DEVICE);
616 r = dma_mapping_error(dev, dst[i]);
617 if (r) {
618 dev_err(adev->dev, "%s: fail %d dma_map_page\n", __func__, r);
619 goto out_oom;
620 }
621
622 pr_debug_ratelimited("dma mapping dst to 0x%llx, pfn 0x%lx\n",
623 dst[i] >> PAGE_SHIFT, page_to_pfn(dpage));
624
625 migrate->dst[i] = migrate_pfn(page_to_pfn(dpage));
626 j++;
627 }
628
629 r = svm_migrate_copy_memory_gart(adev, dst + i - j, src + i - j, j,
630 FROM_VRAM_TO_RAM, mfence);
631
632out_oom:
633 if (r) {
634 pr_debug("failed %d copy to ram\n", r);
635 while (i--) {
636 svm_migrate_put_sys_page(dst[i]);
637 migrate->dst[i] = 0;
638 }
639 }
640
641 return r;
642}
643
644/**
645 * svm_migrate_vma_to_ram - migrate range inside one vma from device to system
646 *
647 * @adev: amdgpu device to migrate from
648 * @prange: svm range structure
649 * @vma: vm_area_struct that range [start, end] belongs to
650 * @start: range start virtual address in pages
651 * @end: range end virtual address in pages
652 *
653 * Context: Process context, caller hold mmap read lock, prange->migrate_mutex
654 *
655 * Return:
656 * 0 - success with all pages migrated
657 * negative values - indicate error
658 * positive values - partial migration, number of pages not migrated
659 */
660static long
661svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
662 struct vm_area_struct *vma, uint64_t start, uint64_t end,
663 uint32_t trigger, struct page *fault_page)
664{
665 struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
666 uint64_t npages = (end - start) >> PAGE_SHIFT;
667 unsigned long upages = npages;
668 unsigned long cpages = 0;
669 struct kfd_process_device *pdd;
670 struct dma_fence *mfence = NULL;
671 struct migrate_vma migrate = { 0 };
672 dma_addr_t *scratch;
673 void *buf;
674 int r = -ENOMEM;
675
676 memset(&migrate, 0, sizeof(migrate));
677 migrate.vma = vma;
678 migrate.start = start;
679 migrate.end = end;
680 migrate.pgmap_owner = SVM_ADEV_PGMAP_OWNER(adev);
681 if (adev->gmc.xgmi.connected_to_cpu)
682 migrate.flags = MIGRATE_VMA_SELECT_DEVICE_COHERENT;
683 else
684 migrate.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
685
686 buf = kvcalloc(npages,
687 2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t),
688 GFP_KERNEL);
689 if (!buf)
690 goto out;
691
692 migrate.src = buf;
693 migrate.dst = migrate.src + npages;
694 migrate.fault_page = fault_page;
695 scratch = (dma_addr_t *)(migrate.dst + npages);
696
697 kfd_smi_event_migration_start(adev->kfd.dev, p->lead_thread->pid,
698 start >> PAGE_SHIFT, end >> PAGE_SHIFT,
699 adev->kfd.dev->id, 0, prange->prefetch_loc,
700 prange->preferred_loc, trigger);
701
702 r = migrate_vma_setup(&migrate);
703 if (r) {
704 dev_err(adev->dev, "%s: vma setup fail %d range [0x%lx 0x%lx]\n",
705 __func__, r, prange->start, prange->last);
706 goto out_free;
707 }
708
709 cpages = migrate.cpages;
710 if (!cpages) {
711 pr_debug("failed collect migrate device pages [0x%lx 0x%lx]\n",
712 prange->start, prange->last);
713 upages = svm_migrate_unsuccessful_pages(&migrate);
714 goto out_free;
715 }
716 if (cpages != npages)
717 pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n",
718 cpages, npages);
719 else
720 pr_debug("0x%lx pages migrated\n", cpages);
721
722 r = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence,
723 scratch, npages);
724 migrate_vma_pages(&migrate);
725
726 upages = svm_migrate_unsuccessful_pages(&migrate);
727 pr_debug("unsuccessful/cpages/npages 0x%lx/0x%lx/0x%lx\n",
728 upages, cpages, migrate.npages);
729
730 svm_migrate_copy_done(adev, mfence);
731 migrate_vma_finalize(&migrate);
732
733 kfd_smi_event_migration_end(adev->kfd.dev, p->lead_thread->pid,
734 start >> PAGE_SHIFT, end >> PAGE_SHIFT,
735 adev->kfd.dev->id, 0, trigger);
736
737 svm_range_dma_unmap(adev->dev, scratch, 0, npages);
738
739out_free:
740 kvfree(buf);
741out:
742 if (!r && cpages) {
743 pdd = svm_range_get_pdd_by_adev(prange, adev);
744 if (pdd)
745 WRITE_ONCE(pdd->page_out, pdd->page_out + cpages);
746 }
747 return r ? r : upages;
748}
749
750/**
751 * svm_migrate_vram_to_ram - migrate svm range from device to system
752 * @prange: range structure
753 * @mm: process mm, use current->mm if NULL
754 * @trigger: reason of migration
755 *
756 * Context: Process context, caller hold mmap read lock, prange->migrate_mutex
757 *
758 * Return:
759 * 0 - OK, otherwise error code
760 */
761int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
762 uint32_t trigger, struct page *fault_page)
763{
764 struct amdgpu_device *adev;
765 struct vm_area_struct *vma;
766 unsigned long addr;
767 unsigned long start;
768 unsigned long end;
769 unsigned long upages = 0;
770 long r = 0;
771
772 if (!prange->actual_loc) {
773 pr_debug("[0x%lx 0x%lx] already migrated to ram\n",
774 prange->start, prange->last);
775 return 0;
776 }
777
778 adev = svm_range_get_adev_by_id(prange, prange->actual_loc);
779 if (!adev) {
780 pr_debug("failed to get device by id 0x%x\n",
781 prange->actual_loc);
782 return -ENODEV;
783 }
784
785 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] from gpu 0x%x to ram\n",
786 prange->svms, prange, prange->start, prange->last,
787 prange->actual_loc);
788
789 start = prange->start << PAGE_SHIFT;
790 end = (prange->last + 1) << PAGE_SHIFT;
791
792 for (addr = start; addr < end;) {
793 unsigned long next;
794
795 vma = vma_lookup(mm, addr);
796 if (!vma) {
797 pr_debug("failed to find vma for prange %p\n", prange);
798 r = -EFAULT;
799 break;
800 }
801
802 next = min(vma->vm_end, end);
803 r = svm_migrate_vma_to_ram(adev, prange, vma, addr, next, trigger,
804 fault_page);
805 if (r < 0) {
806 pr_debug("failed %ld to migrate prange %p\n", r, prange);
807 break;
808 } else {
809 upages += r;
810 }
811 addr = next;
812 }
813
814 if (r >= 0 && !upages) {
815 svm_range_vram_node_free(prange);
816 prange->actual_loc = 0;
817 }
818
819 return r < 0 ? r : 0;
820}
821
822/**
823 * svm_migrate_vram_to_vram - migrate svm range from device to device
824 * @prange: range structure
825 * @best_loc: the device to migrate to
826 * @mm: process mm, use current->mm if NULL
827 * @trigger: reason of migration
828 *
829 * Context: Process context, caller hold mmap read lock, svms lock, prange lock
830 *
831 * Return:
832 * 0 - OK, otherwise error code
833 */
834static int
835svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc,
836 struct mm_struct *mm, uint32_t trigger)
837{
838 int r, retries = 3;
839
840 /*
841 * TODO: for both devices with PCIe large bar or on same xgmi hive, skip
842 * system memory as migration bridge
843 */
844
845 pr_debug("from gpu 0x%x to gpu 0x%x\n", prange->actual_loc, best_loc);
846
847 do {
848 r = svm_migrate_vram_to_ram(prange, mm, trigger, NULL);
849 if (r)
850 return r;
851 } while (prange->actual_loc && --retries);
852
853 if (prange->actual_loc)
854 return -EDEADLK;
855
856 return svm_migrate_ram_to_vram(prange, best_loc, mm, trigger);
857}
858
859int
860svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc,
861 struct mm_struct *mm, uint32_t trigger)
862{
863 if (!prange->actual_loc)
864 return svm_migrate_ram_to_vram(prange, best_loc, mm, trigger);
865 else
866 return svm_migrate_vram_to_vram(prange, best_loc, mm, trigger);
867
868}
869
870/**
871 * svm_migrate_to_ram - CPU page fault handler
872 * @vmf: CPU vm fault vma, address
873 *
874 * Context: vm fault handler, caller holds the mmap read lock
875 *
876 * Return:
877 * 0 - OK
878 * VM_FAULT_SIGBUS - notice application to have SIGBUS page fault
879 */
880static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf)
881{
882 unsigned long addr = vmf->address;
883 struct svm_range_bo *svm_bo;
884 enum svm_work_list_ops op;
885 struct svm_range *parent;
886 struct svm_range *prange;
887 struct kfd_process *p;
888 struct mm_struct *mm;
889 int r = 0;
890
891 svm_bo = vmf->page->zone_device_data;
892 if (!svm_bo) {
893 pr_debug("failed get device page at addr 0x%lx\n", addr);
894 return VM_FAULT_SIGBUS;
895 }
896 if (!mmget_not_zero(svm_bo->eviction_fence->mm)) {
897 pr_debug("addr 0x%lx of process mm is destroyed\n", addr);
898 return VM_FAULT_SIGBUS;
899 }
900
901 mm = svm_bo->eviction_fence->mm;
902 if (mm != vmf->vma->vm_mm)
903 pr_debug("addr 0x%lx is COW mapping in child process\n", addr);
904
905 p = kfd_lookup_process_by_mm(mm);
906 if (!p) {
907 pr_debug("failed find process at fault address 0x%lx\n", addr);
908 r = VM_FAULT_SIGBUS;
909 goto out_mmput;
910 }
911 if (READ_ONCE(p->svms.faulting_task) == current) {
912 pr_debug("skipping ram migration\n");
913 r = 0;
914 goto out_unref_process;
915 }
916
917 pr_debug("CPU page fault svms 0x%p address 0x%lx\n", &p->svms, addr);
918 addr >>= PAGE_SHIFT;
919
920 mutex_lock(&p->svms.lock);
921
922 prange = svm_range_from_addr(&p->svms, addr, &parent);
923 if (!prange) {
924 pr_debug("failed get range svms 0x%p addr 0x%lx\n", &p->svms, addr);
925 r = -EFAULT;
926 goto out_unlock_svms;
927 }
928
929 mutex_lock(&parent->migrate_mutex);
930 if (prange != parent)
931 mutex_lock_nested(&prange->migrate_mutex, 1);
932
933 if (!prange->actual_loc)
934 goto out_unlock_prange;
935
936 svm_range_lock(parent);
937 if (prange != parent)
938 mutex_lock_nested(&prange->lock, 1);
939 r = svm_range_split_by_granularity(p, mm, addr, parent, prange);
940 if (prange != parent)
941 mutex_unlock(&prange->lock);
942 svm_range_unlock(parent);
943 if (r) {
944 pr_debug("failed %d to split range by granularity\n", r);
945 goto out_unlock_prange;
946 }
947
948 r = svm_migrate_vram_to_ram(prange, vmf->vma->vm_mm,
949 KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU,
950 vmf->page);
951 if (r)
952 pr_debug("failed %d migrate svms 0x%p range 0x%p [0x%lx 0x%lx]\n",
953 r, prange->svms, prange, prange->start, prange->last);
954
955 /* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */
956 if (p->xnack_enabled && parent == prange)
957 op = SVM_OP_UPDATE_RANGE_NOTIFIER_AND_MAP;
958 else
959 op = SVM_OP_UPDATE_RANGE_NOTIFIER;
960 svm_range_add_list_work(&p->svms, parent, mm, op);
961 schedule_deferred_list_work(&p->svms);
962
963out_unlock_prange:
964 if (prange != parent)
965 mutex_unlock(&prange->migrate_mutex);
966 mutex_unlock(&parent->migrate_mutex);
967out_unlock_svms:
968 mutex_unlock(&p->svms.lock);
969out_unref_process:
970 pr_debug("CPU fault svms 0x%p address 0x%lx done\n", &p->svms, addr);
971 kfd_unref_process(p);
972out_mmput:
973 mmput(mm);
974 return r ? VM_FAULT_SIGBUS : 0;
975}
976
977static const struct dev_pagemap_ops svm_migrate_pgmap_ops = {
978 .page_free = svm_migrate_page_free,
979 .migrate_to_ram = svm_migrate_to_ram,
980};
981
982/* Each VRAM page uses sizeof(struct page) on system memory */
983#define SVM_HMM_PAGE_STRUCT_SIZE(size) ((size)/PAGE_SIZE * sizeof(struct page))
984
985int svm_migrate_init(struct amdgpu_device *adev)
986{
987 struct kfd_dev *kfddev = adev->kfd.dev;
988 struct dev_pagemap *pgmap;
989 struct resource *res = NULL;
990 unsigned long size;
991 void *r;
992
993 /* Page migration works on Vega10 or newer */
994 if (!KFD_IS_SOC15(kfddev))
995 return -EINVAL;
996
997 pgmap = &kfddev->pgmap;
998 memset(pgmap, 0, sizeof(*pgmap));
999
1000 /* TODO: register all vram to HMM for now.
1001 * should remove reserved size
1002 */
1003 size = ALIGN(adev->gmc.real_vram_size, 2ULL << 20);
1004 if (adev->gmc.xgmi.connected_to_cpu) {
1005 pgmap->range.start = adev->gmc.aper_base;
1006 pgmap->range.end = adev->gmc.aper_base + adev->gmc.aper_size - 1;
1007 pgmap->type = MEMORY_DEVICE_COHERENT;
1008 } else {
1009 res = devm_request_free_mem_region(adev->dev, &iomem_resource, size);
1010 if (IS_ERR(res))
1011 return -ENOMEM;
1012 pgmap->range.start = res->start;
1013 pgmap->range.end = res->end;
1014 pgmap->type = MEMORY_DEVICE_PRIVATE;
1015 }
1016
1017 pgmap->nr_range = 1;
1018 pgmap->ops = &svm_migrate_pgmap_ops;
1019 pgmap->owner = SVM_ADEV_PGMAP_OWNER(adev);
1020 pgmap->flags = 0;
1021 /* Device manager releases device-specific resources, memory region and
1022 * pgmap when driver disconnects from device.
1023 */
1024 r = devm_memremap_pages(adev->dev, pgmap);
1025 if (IS_ERR(r)) {
1026 pr_err("failed to register HMM device memory\n");
1027 /* Disable SVM support capability */
1028 pgmap->type = 0;
1029 if (pgmap->type == MEMORY_DEVICE_PRIVATE)
1030 devm_release_mem_region(adev->dev, res->start,
1031 res->end - res->start + 1);
1032 return PTR_ERR(r);
1033 }
1034
1035 pr_debug("reserve %ldMB system memory for VRAM pages struct\n",
1036 SVM_HMM_PAGE_STRUCT_SIZE(size) >> 20);
1037
1038 amdgpu_amdkfd_reserve_system_mem(SVM_HMM_PAGE_STRUCT_SIZE(size));
1039
1040 svm_range_set_max_pages(adev);
1041
1042 pr_info("HMM registered %ldMB device memory\n", size >> 20);
1043
1044 return 0;
1045}