Loading...
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "nouveau_dmem.h"
23#include "nouveau_drv.h"
24#include "nouveau_chan.h"
25#include "nouveau_dma.h"
26#include "nouveau_mem.h"
27#include "nouveau_bo.h"
28
29#include <nvif/class.h>
30#include <nvif/object.h>
31#include <nvif/if500b.h>
32#include <nvif/if900b.h>
33
34#include <linux/sched/mm.h>
35#include <linux/hmm.h>
36
37/*
38 * FIXME: this is ugly right now we are using TTM to allocate vram and we pin
39 * it in vram while in use. We likely want to overhaul memory management for
40 * nouveau to be more page like (not necessarily with system page size but a
41 * bigger page size) at lowest level and have some shim layer on top that would
42 * provide the same functionality as TTM.
43 */
44#define DMEM_CHUNK_SIZE (2UL << 20)
45#define DMEM_CHUNK_NPAGES (DMEM_CHUNK_SIZE >> PAGE_SHIFT)
46
47enum nouveau_aper {
48 NOUVEAU_APER_VIRT,
49 NOUVEAU_APER_VRAM,
50 NOUVEAU_APER_HOST,
51};
52
53typedef int (*nouveau_migrate_copy_t)(struct nouveau_drm *drm, u64 npages,
54 enum nouveau_aper, u64 dst_addr,
55 enum nouveau_aper, u64 src_addr);
56
57struct nouveau_dmem_chunk {
58 struct list_head list;
59 struct nouveau_bo *bo;
60 struct nouveau_drm *drm;
61 unsigned long pfn_first;
62 unsigned long callocated;
63 unsigned long bitmap[BITS_TO_LONGS(DMEM_CHUNK_NPAGES)];
64 spinlock_t lock;
65};
66
67struct nouveau_dmem_migrate {
68 nouveau_migrate_copy_t copy_func;
69 struct nouveau_channel *chan;
70};
71
72struct nouveau_dmem {
73 struct nouveau_drm *drm;
74 struct dev_pagemap pagemap;
75 struct nouveau_dmem_migrate migrate;
76 struct list_head chunk_free;
77 struct list_head chunk_full;
78 struct list_head chunk_empty;
79 struct mutex mutex;
80};
81
82static inline struct nouveau_dmem *page_to_dmem(struct page *page)
83{
84 return container_of(page->pgmap, struct nouveau_dmem, pagemap);
85}
86
87static unsigned long nouveau_dmem_page_addr(struct page *page)
88{
89 struct nouveau_dmem_chunk *chunk = page->zone_device_data;
90 unsigned long idx = page_to_pfn(page) - chunk->pfn_first;
91
92 return (idx << PAGE_SHIFT) + chunk->bo->bo.offset;
93}
94
95static void nouveau_dmem_page_free(struct page *page)
96{
97 struct nouveau_dmem_chunk *chunk = page->zone_device_data;
98 unsigned long idx = page_to_pfn(page) - chunk->pfn_first;
99
100 /*
101 * FIXME:
102 *
103 * This is really a bad example, we need to overhaul nouveau memory
104 * management to be more page focus and allow lighter locking scheme
105 * to be use in the process.
106 */
107 spin_lock(&chunk->lock);
108 clear_bit(idx, chunk->bitmap);
109 WARN_ON(!chunk->callocated);
110 chunk->callocated--;
111 /*
112 * FIXME when chunk->callocated reach 0 we should add the chunk to
113 * a reclaim list so that it can be freed in case of memory pressure.
114 */
115 spin_unlock(&chunk->lock);
116}
117
118static void nouveau_dmem_fence_done(struct nouveau_fence **fence)
119{
120 if (fence) {
121 nouveau_fence_wait(*fence, true, false);
122 nouveau_fence_unref(fence);
123 } else {
124 /*
125 * FIXME wait for channel to be IDLE before calling finalizing
126 * the hmem object.
127 */
128 }
129}
130
131static vm_fault_t nouveau_dmem_fault_copy_one(struct nouveau_drm *drm,
132 struct vm_fault *vmf, struct migrate_vma *args,
133 dma_addr_t *dma_addr)
134{
135 struct device *dev = drm->dev->dev;
136 struct page *dpage, *spage;
137
138 spage = migrate_pfn_to_page(args->src[0]);
139 if (!spage || !(args->src[0] & MIGRATE_PFN_MIGRATE))
140 return 0;
141
142 dpage = alloc_page_vma(GFP_HIGHUSER, vmf->vma, vmf->address);
143 if (!dpage)
144 return VM_FAULT_SIGBUS;
145 lock_page(dpage);
146
147 *dma_addr = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
148 if (dma_mapping_error(dev, *dma_addr))
149 goto error_free_page;
150
151 if (drm->dmem->migrate.copy_func(drm, 1, NOUVEAU_APER_HOST, *dma_addr,
152 NOUVEAU_APER_VRAM, nouveau_dmem_page_addr(spage)))
153 goto error_dma_unmap;
154
155 args->dst[0] = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
156 return 0;
157
158error_dma_unmap:
159 dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
160error_free_page:
161 __free_page(dpage);
162 return VM_FAULT_SIGBUS;
163}
164
165static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
166{
167 struct nouveau_dmem *dmem = page_to_dmem(vmf->page);
168 struct nouveau_drm *drm = dmem->drm;
169 struct nouveau_fence *fence;
170 unsigned long src = 0, dst = 0;
171 dma_addr_t dma_addr = 0;
172 vm_fault_t ret;
173 struct migrate_vma args = {
174 .vma = vmf->vma,
175 .start = vmf->address,
176 .end = vmf->address + PAGE_SIZE,
177 .src = &src,
178 .dst = &dst,
179 };
180
181 /*
182 * FIXME what we really want is to find some heuristic to migrate more
183 * than just one page on CPU fault. When such fault happens it is very
184 * likely that more surrounding page will CPU fault too.
185 */
186 if (migrate_vma_setup(&args) < 0)
187 return VM_FAULT_SIGBUS;
188 if (!args.cpages)
189 return 0;
190
191 ret = nouveau_dmem_fault_copy_one(drm, vmf, &args, &dma_addr);
192 if (ret || dst == 0)
193 goto done;
194
195 nouveau_fence_new(dmem->migrate.chan, false, &fence);
196 migrate_vma_pages(&args);
197 nouveau_dmem_fence_done(&fence);
198 dma_unmap_page(drm->dev->dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
199done:
200 migrate_vma_finalize(&args);
201 return ret;
202}
203
204static const struct dev_pagemap_ops nouveau_dmem_pagemap_ops = {
205 .page_free = nouveau_dmem_page_free,
206 .migrate_to_ram = nouveau_dmem_migrate_to_ram,
207};
208
209static int
210nouveau_dmem_chunk_alloc(struct nouveau_drm *drm)
211{
212 struct nouveau_dmem_chunk *chunk;
213 int ret;
214
215 if (drm->dmem == NULL)
216 return -EINVAL;
217
218 mutex_lock(&drm->dmem->mutex);
219 chunk = list_first_entry_or_null(&drm->dmem->chunk_empty,
220 struct nouveau_dmem_chunk,
221 list);
222 if (chunk == NULL) {
223 mutex_unlock(&drm->dmem->mutex);
224 return -ENOMEM;
225 }
226
227 list_del(&chunk->list);
228 mutex_unlock(&drm->dmem->mutex);
229
230 ret = nouveau_bo_new(&drm->client, DMEM_CHUNK_SIZE, 0,
231 TTM_PL_FLAG_VRAM, 0, 0, NULL, NULL,
232 &chunk->bo);
233 if (ret)
234 goto out;
235
236 ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
237 if (ret) {
238 nouveau_bo_ref(NULL, &chunk->bo);
239 goto out;
240 }
241
242 bitmap_zero(chunk->bitmap, DMEM_CHUNK_NPAGES);
243 spin_lock_init(&chunk->lock);
244
245out:
246 mutex_lock(&drm->dmem->mutex);
247 if (chunk->bo)
248 list_add(&chunk->list, &drm->dmem->chunk_empty);
249 else
250 list_add_tail(&chunk->list, &drm->dmem->chunk_empty);
251 mutex_unlock(&drm->dmem->mutex);
252
253 return ret;
254}
255
256static struct nouveau_dmem_chunk *
257nouveau_dmem_chunk_first_free_locked(struct nouveau_drm *drm)
258{
259 struct nouveau_dmem_chunk *chunk;
260
261 chunk = list_first_entry_or_null(&drm->dmem->chunk_free,
262 struct nouveau_dmem_chunk,
263 list);
264 if (chunk)
265 return chunk;
266
267 chunk = list_first_entry_or_null(&drm->dmem->chunk_empty,
268 struct nouveau_dmem_chunk,
269 list);
270 if (chunk->bo)
271 return chunk;
272
273 return NULL;
274}
275
276static int
277nouveau_dmem_pages_alloc(struct nouveau_drm *drm,
278 unsigned long npages,
279 unsigned long *pages)
280{
281 struct nouveau_dmem_chunk *chunk;
282 unsigned long c;
283 int ret;
284
285 memset(pages, 0xff, npages * sizeof(*pages));
286
287 mutex_lock(&drm->dmem->mutex);
288 for (c = 0; c < npages;) {
289 unsigned long i;
290
291 chunk = nouveau_dmem_chunk_first_free_locked(drm);
292 if (chunk == NULL) {
293 mutex_unlock(&drm->dmem->mutex);
294 ret = nouveau_dmem_chunk_alloc(drm);
295 if (ret) {
296 if (c)
297 return 0;
298 return ret;
299 }
300 mutex_lock(&drm->dmem->mutex);
301 continue;
302 }
303
304 spin_lock(&chunk->lock);
305 i = find_first_zero_bit(chunk->bitmap, DMEM_CHUNK_NPAGES);
306 while (i < DMEM_CHUNK_NPAGES && c < npages) {
307 pages[c] = chunk->pfn_first + i;
308 set_bit(i, chunk->bitmap);
309 chunk->callocated++;
310 c++;
311
312 i = find_next_zero_bit(chunk->bitmap,
313 DMEM_CHUNK_NPAGES, i);
314 }
315 spin_unlock(&chunk->lock);
316 }
317 mutex_unlock(&drm->dmem->mutex);
318
319 return 0;
320}
321
322static struct page *
323nouveau_dmem_page_alloc_locked(struct nouveau_drm *drm)
324{
325 unsigned long pfns[1];
326 struct page *page;
327 int ret;
328
329 /* FIXME stop all the miss-match API ... */
330 ret = nouveau_dmem_pages_alloc(drm, 1, pfns);
331 if (ret)
332 return NULL;
333
334 page = pfn_to_page(pfns[0]);
335 get_page(page);
336 lock_page(page);
337 return page;
338}
339
340static void
341nouveau_dmem_page_free_locked(struct nouveau_drm *drm, struct page *page)
342{
343 unlock_page(page);
344 put_page(page);
345}
346
347void
348nouveau_dmem_resume(struct nouveau_drm *drm)
349{
350 struct nouveau_dmem_chunk *chunk;
351 int ret;
352
353 if (drm->dmem == NULL)
354 return;
355
356 mutex_lock(&drm->dmem->mutex);
357 list_for_each_entry (chunk, &drm->dmem->chunk_free, list) {
358 ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
359 /* FIXME handle pin failure */
360 WARN_ON(ret);
361 }
362 list_for_each_entry (chunk, &drm->dmem->chunk_full, list) {
363 ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
364 /* FIXME handle pin failure */
365 WARN_ON(ret);
366 }
367 mutex_unlock(&drm->dmem->mutex);
368}
369
370void
371nouveau_dmem_suspend(struct nouveau_drm *drm)
372{
373 struct nouveau_dmem_chunk *chunk;
374
375 if (drm->dmem == NULL)
376 return;
377
378 mutex_lock(&drm->dmem->mutex);
379 list_for_each_entry (chunk, &drm->dmem->chunk_free, list) {
380 nouveau_bo_unpin(chunk->bo);
381 }
382 list_for_each_entry (chunk, &drm->dmem->chunk_full, list) {
383 nouveau_bo_unpin(chunk->bo);
384 }
385 mutex_unlock(&drm->dmem->mutex);
386}
387
388void
389nouveau_dmem_fini(struct nouveau_drm *drm)
390{
391 struct nouveau_dmem_chunk *chunk, *tmp;
392
393 if (drm->dmem == NULL)
394 return;
395
396 mutex_lock(&drm->dmem->mutex);
397
398 WARN_ON(!list_empty(&drm->dmem->chunk_free));
399 WARN_ON(!list_empty(&drm->dmem->chunk_full));
400
401 list_for_each_entry_safe (chunk, tmp, &drm->dmem->chunk_empty, list) {
402 if (chunk->bo) {
403 nouveau_bo_unpin(chunk->bo);
404 nouveau_bo_ref(NULL, &chunk->bo);
405 }
406 list_del(&chunk->list);
407 kfree(chunk);
408 }
409
410 mutex_unlock(&drm->dmem->mutex);
411}
412
413static int
414nvc0b5_migrate_copy(struct nouveau_drm *drm, u64 npages,
415 enum nouveau_aper dst_aper, u64 dst_addr,
416 enum nouveau_aper src_aper, u64 src_addr)
417{
418 struct nouveau_channel *chan = drm->dmem->migrate.chan;
419 u32 launch_dma = (1 << 9) /* MULTI_LINE_ENABLE. */ |
420 (1 << 8) /* DST_MEMORY_LAYOUT_PITCH. */ |
421 (1 << 7) /* SRC_MEMORY_LAYOUT_PITCH. */ |
422 (1 << 2) /* FLUSH_ENABLE_TRUE. */ |
423 (2 << 0) /* DATA_TRANSFER_TYPE_NON_PIPELINED. */;
424 int ret;
425
426 ret = RING_SPACE(chan, 13);
427 if (ret)
428 return ret;
429
430 if (src_aper != NOUVEAU_APER_VIRT) {
431 switch (src_aper) {
432 case NOUVEAU_APER_VRAM:
433 BEGIN_IMC0(chan, NvSubCopy, 0x0260, 0);
434 break;
435 case NOUVEAU_APER_HOST:
436 BEGIN_IMC0(chan, NvSubCopy, 0x0260, 1);
437 break;
438 default:
439 return -EINVAL;
440 }
441 launch_dma |= 0x00001000; /* SRC_TYPE_PHYSICAL. */
442 }
443
444 if (dst_aper != NOUVEAU_APER_VIRT) {
445 switch (dst_aper) {
446 case NOUVEAU_APER_VRAM:
447 BEGIN_IMC0(chan, NvSubCopy, 0x0264, 0);
448 break;
449 case NOUVEAU_APER_HOST:
450 BEGIN_IMC0(chan, NvSubCopy, 0x0264, 1);
451 break;
452 default:
453 return -EINVAL;
454 }
455 launch_dma |= 0x00002000; /* DST_TYPE_PHYSICAL. */
456 }
457
458 BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
459 OUT_RING (chan, upper_32_bits(src_addr));
460 OUT_RING (chan, lower_32_bits(src_addr));
461 OUT_RING (chan, upper_32_bits(dst_addr));
462 OUT_RING (chan, lower_32_bits(dst_addr));
463 OUT_RING (chan, PAGE_SIZE);
464 OUT_RING (chan, PAGE_SIZE);
465 OUT_RING (chan, PAGE_SIZE);
466 OUT_RING (chan, npages);
467 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
468 OUT_RING (chan, launch_dma);
469 return 0;
470}
471
472static int
473nouveau_dmem_migrate_init(struct nouveau_drm *drm)
474{
475 switch (drm->ttm.copy.oclass) {
476 case PASCAL_DMA_COPY_A:
477 case PASCAL_DMA_COPY_B:
478 case VOLTA_DMA_COPY_A:
479 case TURING_DMA_COPY_A:
480 drm->dmem->migrate.copy_func = nvc0b5_migrate_copy;
481 drm->dmem->migrate.chan = drm->ttm.chan;
482 return 0;
483 default:
484 break;
485 }
486 return -ENODEV;
487}
488
489void
490nouveau_dmem_init(struct nouveau_drm *drm)
491{
492 struct device *device = drm->dev->dev;
493 struct resource *res;
494 unsigned long i, size, pfn_first;
495 int ret;
496
497 /* This only make sense on PASCAL or newer */
498 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_PASCAL)
499 return;
500
501 if (!(drm->dmem = kzalloc(sizeof(*drm->dmem), GFP_KERNEL)))
502 return;
503
504 drm->dmem->drm = drm;
505 mutex_init(&drm->dmem->mutex);
506 INIT_LIST_HEAD(&drm->dmem->chunk_free);
507 INIT_LIST_HEAD(&drm->dmem->chunk_full);
508 INIT_LIST_HEAD(&drm->dmem->chunk_empty);
509
510 size = ALIGN(drm->client.device.info.ram_user, DMEM_CHUNK_SIZE);
511
512 /* Initialize migration dma helpers before registering memory */
513 ret = nouveau_dmem_migrate_init(drm);
514 if (ret)
515 goto out_free;
516
517 /*
518 * FIXME we need some kind of policy to decide how much VRAM we
519 * want to register with HMM. For now just register everything
520 * and latter if we want to do thing like over commit then we
521 * could revisit this.
522 */
523 res = devm_request_free_mem_region(device, &iomem_resource, size);
524 if (IS_ERR(res))
525 goto out_free;
526 drm->dmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
527 drm->dmem->pagemap.res = *res;
528 drm->dmem->pagemap.ops = &nouveau_dmem_pagemap_ops;
529 if (IS_ERR(devm_memremap_pages(device, &drm->dmem->pagemap)))
530 goto out_free;
531
532 pfn_first = res->start >> PAGE_SHIFT;
533 for (i = 0; i < (size / DMEM_CHUNK_SIZE); ++i) {
534 struct nouveau_dmem_chunk *chunk;
535 struct page *page;
536 unsigned long j;
537
538 chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
539 if (chunk == NULL) {
540 nouveau_dmem_fini(drm);
541 return;
542 }
543
544 chunk->drm = drm;
545 chunk->pfn_first = pfn_first + (i * DMEM_CHUNK_NPAGES);
546 list_add_tail(&chunk->list, &drm->dmem->chunk_empty);
547
548 page = pfn_to_page(chunk->pfn_first);
549 for (j = 0; j < DMEM_CHUNK_NPAGES; ++j, ++page)
550 page->zone_device_data = chunk;
551 }
552
553 NV_INFO(drm, "DMEM: registered %ldMB of device memory\n", size >> 20);
554 return;
555out_free:
556 kfree(drm->dmem);
557 drm->dmem = NULL;
558}
559
560static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
561 unsigned long src, dma_addr_t *dma_addr)
562{
563 struct device *dev = drm->dev->dev;
564 struct page *dpage, *spage;
565
566 spage = migrate_pfn_to_page(src);
567 if (!spage || !(src & MIGRATE_PFN_MIGRATE))
568 goto out;
569
570 dpage = nouveau_dmem_page_alloc_locked(drm);
571 if (!dpage)
572 return 0;
573
574 *dma_addr = dma_map_page(dev, spage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
575 if (dma_mapping_error(dev, *dma_addr))
576 goto out_free_page;
577
578 if (drm->dmem->migrate.copy_func(drm, 1, NOUVEAU_APER_VRAM,
579 nouveau_dmem_page_addr(dpage), NOUVEAU_APER_HOST,
580 *dma_addr))
581 goto out_dma_unmap;
582
583 return migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
584
585out_dma_unmap:
586 dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
587out_free_page:
588 nouveau_dmem_page_free_locked(drm, dpage);
589out:
590 return 0;
591}
592
593static void nouveau_dmem_migrate_chunk(struct nouveau_drm *drm,
594 struct migrate_vma *args, dma_addr_t *dma_addrs)
595{
596 struct nouveau_fence *fence;
597 unsigned long addr = args->start, nr_dma = 0, i;
598
599 for (i = 0; addr < args->end; i++) {
600 args->dst[i] = nouveau_dmem_migrate_copy_one(drm, args->src[i],
601 dma_addrs + nr_dma);
602 if (args->dst[i])
603 nr_dma++;
604 addr += PAGE_SIZE;
605 }
606
607 nouveau_fence_new(drm->dmem->migrate.chan, false, &fence);
608 migrate_vma_pages(args);
609 nouveau_dmem_fence_done(&fence);
610
611 while (nr_dma--) {
612 dma_unmap_page(drm->dev->dev, dma_addrs[nr_dma], PAGE_SIZE,
613 DMA_BIDIRECTIONAL);
614 }
615 /*
616 * FIXME optimization: update GPU page table to point to newly migrated
617 * memory.
618 */
619 migrate_vma_finalize(args);
620}
621
622int
623nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
624 struct vm_area_struct *vma,
625 unsigned long start,
626 unsigned long end)
627{
628 unsigned long npages = (end - start) >> PAGE_SHIFT;
629 unsigned long max = min(SG_MAX_SINGLE_ALLOC, npages);
630 dma_addr_t *dma_addrs;
631 struct migrate_vma args = {
632 .vma = vma,
633 .start = start,
634 };
635 unsigned long c, i;
636 int ret = -ENOMEM;
637
638 args.src = kcalloc(max, sizeof(args.src), GFP_KERNEL);
639 if (!args.src)
640 goto out;
641 args.dst = kcalloc(max, sizeof(args.dst), GFP_KERNEL);
642 if (!args.dst)
643 goto out_free_src;
644
645 dma_addrs = kmalloc_array(max, sizeof(*dma_addrs), GFP_KERNEL);
646 if (!dma_addrs)
647 goto out_free_dst;
648
649 for (i = 0; i < npages; i += c) {
650 c = min(SG_MAX_SINGLE_ALLOC, npages);
651 args.end = start + (c << PAGE_SHIFT);
652 ret = migrate_vma_setup(&args);
653 if (ret)
654 goto out_free_dma;
655
656 if (args.cpages)
657 nouveau_dmem_migrate_chunk(drm, &args, dma_addrs);
658 args.start = args.end;
659 }
660
661 ret = 0;
662out_free_dma:
663 kfree(dma_addrs);
664out_free_dst:
665 kfree(args.dst);
666out_free_src:
667 kfree(args.src);
668out:
669 return ret;
670}
671
672static inline bool
673nouveau_dmem_page(struct nouveau_drm *drm, struct page *page)
674{
675 return is_device_private_page(page) && drm->dmem == page_to_dmem(page);
676}
677
678void
679nouveau_dmem_convert_pfn(struct nouveau_drm *drm,
680 struct hmm_range *range)
681{
682 unsigned long i, npages;
683
684 npages = (range->end - range->start) >> PAGE_SHIFT;
685 for (i = 0; i < npages; ++i) {
686 struct page *page;
687 uint64_t addr;
688
689 page = hmm_device_entry_to_page(range, range->pfns[i]);
690 if (page == NULL)
691 continue;
692
693 if (!(range->pfns[i] & range->flags[HMM_PFN_DEVICE_PRIVATE])) {
694 continue;
695 }
696
697 if (!nouveau_dmem_page(drm, page)) {
698 WARN(1, "Some unknown device memory !\n");
699 range->pfns[i] = 0;
700 continue;
701 }
702
703 addr = nouveau_dmem_page_addr(page);
704 range->pfns[i] &= ((1UL << range->pfn_shift) - 1);
705 range->pfns[i] |= (addr >> PAGE_SHIFT) << range->pfn_shift;
706 }
707}
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "nouveau_dmem.h"
23#include "nouveau_drv.h"
24#include "nouveau_chan.h"
25#include "nouveau_dma.h"
26#include "nouveau_mem.h"
27#include "nouveau_bo.h"
28#include "nouveau_svm.h"
29
30#include <nvif/class.h>
31#include <nvif/object.h>
32#include <nvif/push906f.h>
33#include <nvif/if000c.h>
34#include <nvif/if500b.h>
35#include <nvif/if900b.h>
36#include <nvif/if000c.h>
37
38#include <nvhw/class/cla0b5.h>
39
40#include <linux/sched/mm.h>
41#include <linux/hmm.h>
42
43/*
44 * FIXME: this is ugly right now we are using TTM to allocate vram and we pin
45 * it in vram while in use. We likely want to overhaul memory management for
46 * nouveau to be more page like (not necessarily with system page size but a
47 * bigger page size) at lowest level and have some shim layer on top that would
48 * provide the same functionality as TTM.
49 */
50#define DMEM_CHUNK_SIZE (2UL << 20)
51#define DMEM_CHUNK_NPAGES (DMEM_CHUNK_SIZE >> PAGE_SHIFT)
52
53enum nouveau_aper {
54 NOUVEAU_APER_VIRT,
55 NOUVEAU_APER_VRAM,
56 NOUVEAU_APER_HOST,
57};
58
59typedef int (*nouveau_migrate_copy_t)(struct nouveau_drm *drm, u64 npages,
60 enum nouveau_aper, u64 dst_addr,
61 enum nouveau_aper, u64 src_addr);
62typedef int (*nouveau_clear_page_t)(struct nouveau_drm *drm, u32 length,
63 enum nouveau_aper, u64 dst_addr);
64
65struct nouveau_dmem_chunk {
66 struct list_head list;
67 struct nouveau_bo *bo;
68 struct nouveau_drm *drm;
69 unsigned long callocated;
70 struct dev_pagemap pagemap;
71};
72
73struct nouveau_dmem_migrate {
74 nouveau_migrate_copy_t copy_func;
75 nouveau_clear_page_t clear_func;
76 struct nouveau_channel *chan;
77};
78
79struct nouveau_dmem {
80 struct nouveau_drm *drm;
81 struct nouveau_dmem_migrate migrate;
82 struct list_head chunks;
83 struct mutex mutex;
84 struct page *free_pages;
85 spinlock_t lock;
86};
87
88static struct nouveau_dmem_chunk *nouveau_page_to_chunk(struct page *page)
89{
90 return container_of(page->pgmap, struct nouveau_dmem_chunk, pagemap);
91}
92
93static struct nouveau_drm *page_to_drm(struct page *page)
94{
95 struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
96
97 return chunk->drm;
98}
99
100unsigned long nouveau_dmem_page_addr(struct page *page)
101{
102 struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
103 unsigned long off = (page_to_pfn(page) << PAGE_SHIFT) -
104 chunk->pagemap.range.start;
105
106 return chunk->bo->offset + off;
107}
108
109static void nouveau_dmem_page_free(struct page *page)
110{
111 struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
112 struct nouveau_dmem *dmem = chunk->drm->dmem;
113
114 spin_lock(&dmem->lock);
115 page->zone_device_data = dmem->free_pages;
116 dmem->free_pages = page;
117
118 WARN_ON(!chunk->callocated);
119 chunk->callocated--;
120 /*
121 * FIXME when chunk->callocated reach 0 we should add the chunk to
122 * a reclaim list so that it can be freed in case of memory pressure.
123 */
124 spin_unlock(&dmem->lock);
125}
126
127static void nouveau_dmem_fence_done(struct nouveau_fence **fence)
128{
129 if (fence) {
130 nouveau_fence_wait(*fence, true, false);
131 nouveau_fence_unref(fence);
132 } else {
133 /*
134 * FIXME wait for channel to be IDLE before calling finalizing
135 * the hmem object.
136 */
137 }
138}
139
140static vm_fault_t nouveau_dmem_fault_copy_one(struct nouveau_drm *drm,
141 struct vm_fault *vmf, struct migrate_vma *args,
142 dma_addr_t *dma_addr)
143{
144 struct device *dev = drm->dev->dev;
145 struct page *dpage, *spage;
146 struct nouveau_svmm *svmm;
147
148 spage = migrate_pfn_to_page(args->src[0]);
149 if (!spage || !(args->src[0] & MIGRATE_PFN_MIGRATE))
150 return 0;
151
152 dpage = alloc_page_vma(GFP_HIGHUSER, vmf->vma, vmf->address);
153 if (!dpage)
154 return VM_FAULT_SIGBUS;
155 lock_page(dpage);
156
157 *dma_addr = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
158 if (dma_mapping_error(dev, *dma_addr))
159 goto error_free_page;
160
161 svmm = spage->zone_device_data;
162 mutex_lock(&svmm->mutex);
163 nouveau_svmm_invalidate(svmm, args->start, args->end);
164 if (drm->dmem->migrate.copy_func(drm, 1, NOUVEAU_APER_HOST, *dma_addr,
165 NOUVEAU_APER_VRAM, nouveau_dmem_page_addr(spage)))
166 goto error_dma_unmap;
167 mutex_unlock(&svmm->mutex);
168
169 args->dst[0] = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
170 return 0;
171
172error_dma_unmap:
173 mutex_unlock(&svmm->mutex);
174 dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
175error_free_page:
176 __free_page(dpage);
177 return VM_FAULT_SIGBUS;
178}
179
180static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
181{
182 struct nouveau_drm *drm = page_to_drm(vmf->page);
183 struct nouveau_dmem *dmem = drm->dmem;
184 struct nouveau_fence *fence;
185 unsigned long src = 0, dst = 0;
186 dma_addr_t dma_addr = 0;
187 vm_fault_t ret;
188 struct migrate_vma args = {
189 .vma = vmf->vma,
190 .start = vmf->address,
191 .end = vmf->address + PAGE_SIZE,
192 .src = &src,
193 .dst = &dst,
194 .pgmap_owner = drm->dev,
195 .flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE,
196 };
197
198 /*
199 * FIXME what we really want is to find some heuristic to migrate more
200 * than just one page on CPU fault. When such fault happens it is very
201 * likely that more surrounding page will CPU fault too.
202 */
203 if (migrate_vma_setup(&args) < 0)
204 return VM_FAULT_SIGBUS;
205 if (!args.cpages)
206 return 0;
207
208 ret = nouveau_dmem_fault_copy_one(drm, vmf, &args, &dma_addr);
209 if (ret || dst == 0)
210 goto done;
211
212 nouveau_fence_new(dmem->migrate.chan, false, &fence);
213 migrate_vma_pages(&args);
214 nouveau_dmem_fence_done(&fence);
215 dma_unmap_page(drm->dev->dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
216done:
217 migrate_vma_finalize(&args);
218 return ret;
219}
220
221static const struct dev_pagemap_ops nouveau_dmem_pagemap_ops = {
222 .page_free = nouveau_dmem_page_free,
223 .migrate_to_ram = nouveau_dmem_migrate_to_ram,
224};
225
226static int
227nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
228{
229 struct nouveau_dmem_chunk *chunk;
230 struct resource *res;
231 struct page *page;
232 void *ptr;
233 unsigned long i, pfn_first;
234 int ret;
235
236 chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
237 if (chunk == NULL) {
238 ret = -ENOMEM;
239 goto out;
240 }
241
242 /* Allocate unused physical address space for device private pages. */
243 res = request_free_mem_region(&iomem_resource, DMEM_CHUNK_SIZE,
244 "nouveau_dmem");
245 if (IS_ERR(res)) {
246 ret = PTR_ERR(res);
247 goto out_free;
248 }
249
250 chunk->drm = drm;
251 chunk->pagemap.type = MEMORY_DEVICE_PRIVATE;
252 chunk->pagemap.range.start = res->start;
253 chunk->pagemap.range.end = res->end;
254 chunk->pagemap.nr_range = 1;
255 chunk->pagemap.ops = &nouveau_dmem_pagemap_ops;
256 chunk->pagemap.owner = drm->dev;
257
258 ret = nouveau_bo_new(&drm->client, DMEM_CHUNK_SIZE, 0,
259 NOUVEAU_GEM_DOMAIN_VRAM, 0, 0, NULL, NULL,
260 &chunk->bo);
261 if (ret)
262 goto out_release;
263
264 ret = nouveau_bo_pin(chunk->bo, NOUVEAU_GEM_DOMAIN_VRAM, false);
265 if (ret)
266 goto out_bo_free;
267
268 ptr = memremap_pages(&chunk->pagemap, numa_node_id());
269 if (IS_ERR(ptr)) {
270 ret = PTR_ERR(ptr);
271 goto out_bo_unpin;
272 }
273
274 mutex_lock(&drm->dmem->mutex);
275 list_add(&chunk->list, &drm->dmem->chunks);
276 mutex_unlock(&drm->dmem->mutex);
277
278 pfn_first = chunk->pagemap.range.start >> PAGE_SHIFT;
279 page = pfn_to_page(pfn_first);
280 spin_lock(&drm->dmem->lock);
281 for (i = 0; i < DMEM_CHUNK_NPAGES - 1; ++i, ++page) {
282 page->zone_device_data = drm->dmem->free_pages;
283 drm->dmem->free_pages = page;
284 }
285 *ppage = page;
286 chunk->callocated++;
287 spin_unlock(&drm->dmem->lock);
288
289 NV_INFO(drm, "DMEM: registered %ldMB of device memory\n",
290 DMEM_CHUNK_SIZE >> 20);
291
292 return 0;
293
294out_bo_unpin:
295 nouveau_bo_unpin(chunk->bo);
296out_bo_free:
297 nouveau_bo_ref(NULL, &chunk->bo);
298out_release:
299 release_mem_region(chunk->pagemap.range.start, range_len(&chunk->pagemap.range));
300out_free:
301 kfree(chunk);
302out:
303 return ret;
304}
305
306static struct page *
307nouveau_dmem_page_alloc_locked(struct nouveau_drm *drm)
308{
309 struct nouveau_dmem_chunk *chunk;
310 struct page *page = NULL;
311 int ret;
312
313 spin_lock(&drm->dmem->lock);
314 if (drm->dmem->free_pages) {
315 page = drm->dmem->free_pages;
316 drm->dmem->free_pages = page->zone_device_data;
317 chunk = nouveau_page_to_chunk(page);
318 chunk->callocated++;
319 spin_unlock(&drm->dmem->lock);
320 } else {
321 spin_unlock(&drm->dmem->lock);
322 ret = nouveau_dmem_chunk_alloc(drm, &page);
323 if (ret)
324 return NULL;
325 }
326
327 get_page(page);
328 lock_page(page);
329 return page;
330}
331
332static void
333nouveau_dmem_page_free_locked(struct nouveau_drm *drm, struct page *page)
334{
335 unlock_page(page);
336 put_page(page);
337}
338
339void
340nouveau_dmem_resume(struct nouveau_drm *drm)
341{
342 struct nouveau_dmem_chunk *chunk;
343 int ret;
344
345 if (drm->dmem == NULL)
346 return;
347
348 mutex_lock(&drm->dmem->mutex);
349 list_for_each_entry(chunk, &drm->dmem->chunks, list) {
350 ret = nouveau_bo_pin(chunk->bo, NOUVEAU_GEM_DOMAIN_VRAM, false);
351 /* FIXME handle pin failure */
352 WARN_ON(ret);
353 }
354 mutex_unlock(&drm->dmem->mutex);
355}
356
357void
358nouveau_dmem_suspend(struct nouveau_drm *drm)
359{
360 struct nouveau_dmem_chunk *chunk;
361
362 if (drm->dmem == NULL)
363 return;
364
365 mutex_lock(&drm->dmem->mutex);
366 list_for_each_entry(chunk, &drm->dmem->chunks, list)
367 nouveau_bo_unpin(chunk->bo);
368 mutex_unlock(&drm->dmem->mutex);
369}
370
371void
372nouveau_dmem_fini(struct nouveau_drm *drm)
373{
374 struct nouveau_dmem_chunk *chunk, *tmp;
375
376 if (drm->dmem == NULL)
377 return;
378
379 mutex_lock(&drm->dmem->mutex);
380
381 list_for_each_entry_safe(chunk, tmp, &drm->dmem->chunks, list) {
382 nouveau_bo_unpin(chunk->bo);
383 nouveau_bo_ref(NULL, &chunk->bo);
384 list_del(&chunk->list);
385 memunmap_pages(&chunk->pagemap);
386 release_mem_region(chunk->pagemap.range.start,
387 range_len(&chunk->pagemap.range));
388 kfree(chunk);
389 }
390
391 mutex_unlock(&drm->dmem->mutex);
392}
393
394static int
395nvc0b5_migrate_copy(struct nouveau_drm *drm, u64 npages,
396 enum nouveau_aper dst_aper, u64 dst_addr,
397 enum nouveau_aper src_aper, u64 src_addr)
398{
399 struct nvif_push *push = drm->dmem->migrate.chan->chan.push;
400 u32 launch_dma = 0;
401 int ret;
402
403 ret = PUSH_WAIT(push, 13);
404 if (ret)
405 return ret;
406
407 if (src_aper != NOUVEAU_APER_VIRT) {
408 switch (src_aper) {
409 case NOUVEAU_APER_VRAM:
410 PUSH_IMMD(push, NVA0B5, SET_SRC_PHYS_MODE,
411 NVDEF(NVA0B5, SET_SRC_PHYS_MODE, TARGET, LOCAL_FB));
412 break;
413 case NOUVEAU_APER_HOST:
414 PUSH_IMMD(push, NVA0B5, SET_SRC_PHYS_MODE,
415 NVDEF(NVA0B5, SET_SRC_PHYS_MODE, TARGET, COHERENT_SYSMEM));
416 break;
417 default:
418 return -EINVAL;
419 }
420
421 launch_dma |= NVDEF(NVA0B5, LAUNCH_DMA, SRC_TYPE, PHYSICAL);
422 }
423
424 if (dst_aper != NOUVEAU_APER_VIRT) {
425 switch (dst_aper) {
426 case NOUVEAU_APER_VRAM:
427 PUSH_IMMD(push, NVA0B5, SET_DST_PHYS_MODE,
428 NVDEF(NVA0B5, SET_DST_PHYS_MODE, TARGET, LOCAL_FB));
429 break;
430 case NOUVEAU_APER_HOST:
431 PUSH_IMMD(push, NVA0B5, SET_DST_PHYS_MODE,
432 NVDEF(NVA0B5, SET_DST_PHYS_MODE, TARGET, COHERENT_SYSMEM));
433 break;
434 default:
435 return -EINVAL;
436 }
437
438 launch_dma |= NVDEF(NVA0B5, LAUNCH_DMA, DST_TYPE, PHYSICAL);
439 }
440
441 PUSH_MTHD(push, NVA0B5, OFFSET_IN_UPPER,
442 NVVAL(NVA0B5, OFFSET_IN_UPPER, UPPER, upper_32_bits(src_addr)),
443
444 OFFSET_IN_LOWER, lower_32_bits(src_addr),
445
446 OFFSET_OUT_UPPER,
447 NVVAL(NVA0B5, OFFSET_OUT_UPPER, UPPER, upper_32_bits(dst_addr)),
448
449 OFFSET_OUT_LOWER, lower_32_bits(dst_addr),
450 PITCH_IN, PAGE_SIZE,
451 PITCH_OUT, PAGE_SIZE,
452 LINE_LENGTH_IN, PAGE_SIZE,
453 LINE_COUNT, npages);
454
455 PUSH_MTHD(push, NVA0B5, LAUNCH_DMA, launch_dma |
456 NVDEF(NVA0B5, LAUNCH_DMA, DATA_TRANSFER_TYPE, NON_PIPELINED) |
457 NVDEF(NVA0B5, LAUNCH_DMA, FLUSH_ENABLE, TRUE) |
458 NVDEF(NVA0B5, LAUNCH_DMA, SEMAPHORE_TYPE, NONE) |
459 NVDEF(NVA0B5, LAUNCH_DMA, INTERRUPT_TYPE, NONE) |
460 NVDEF(NVA0B5, LAUNCH_DMA, SRC_MEMORY_LAYOUT, PITCH) |
461 NVDEF(NVA0B5, LAUNCH_DMA, DST_MEMORY_LAYOUT, PITCH) |
462 NVDEF(NVA0B5, LAUNCH_DMA, MULTI_LINE_ENABLE, TRUE) |
463 NVDEF(NVA0B5, LAUNCH_DMA, REMAP_ENABLE, FALSE) |
464 NVDEF(NVA0B5, LAUNCH_DMA, BYPASS_L2, USE_PTE_SETTING));
465 return 0;
466}
467
468static int
469nvc0b5_migrate_clear(struct nouveau_drm *drm, u32 length,
470 enum nouveau_aper dst_aper, u64 dst_addr)
471{
472 struct nvif_push *push = drm->dmem->migrate.chan->chan.push;
473 u32 launch_dma = 0;
474 int ret;
475
476 ret = PUSH_WAIT(push, 12);
477 if (ret)
478 return ret;
479
480 switch (dst_aper) {
481 case NOUVEAU_APER_VRAM:
482 PUSH_IMMD(push, NVA0B5, SET_DST_PHYS_MODE,
483 NVDEF(NVA0B5, SET_DST_PHYS_MODE, TARGET, LOCAL_FB));
484 break;
485 case NOUVEAU_APER_HOST:
486 PUSH_IMMD(push, NVA0B5, SET_DST_PHYS_MODE,
487 NVDEF(NVA0B5, SET_DST_PHYS_MODE, TARGET, COHERENT_SYSMEM));
488 break;
489 default:
490 return -EINVAL;
491 }
492
493 launch_dma |= NVDEF(NVA0B5, LAUNCH_DMA, DST_TYPE, PHYSICAL);
494
495 PUSH_MTHD(push, NVA0B5, SET_REMAP_CONST_A, 0,
496 SET_REMAP_CONST_B, 0,
497
498 SET_REMAP_COMPONENTS,
499 NVDEF(NVA0B5, SET_REMAP_COMPONENTS, DST_X, CONST_A) |
500 NVDEF(NVA0B5, SET_REMAP_COMPONENTS, DST_Y, CONST_B) |
501 NVDEF(NVA0B5, SET_REMAP_COMPONENTS, COMPONENT_SIZE, FOUR) |
502 NVDEF(NVA0B5, SET_REMAP_COMPONENTS, NUM_DST_COMPONENTS, TWO));
503
504 PUSH_MTHD(push, NVA0B5, OFFSET_OUT_UPPER,
505 NVVAL(NVA0B5, OFFSET_OUT_UPPER, UPPER, upper_32_bits(dst_addr)),
506
507 OFFSET_OUT_LOWER, lower_32_bits(dst_addr));
508
509 PUSH_MTHD(push, NVA0B5, LINE_LENGTH_IN, length >> 3);
510
511 PUSH_MTHD(push, NVA0B5, LAUNCH_DMA, launch_dma |
512 NVDEF(NVA0B5, LAUNCH_DMA, DATA_TRANSFER_TYPE, NON_PIPELINED) |
513 NVDEF(NVA0B5, LAUNCH_DMA, FLUSH_ENABLE, TRUE) |
514 NVDEF(NVA0B5, LAUNCH_DMA, SEMAPHORE_TYPE, NONE) |
515 NVDEF(NVA0B5, LAUNCH_DMA, INTERRUPT_TYPE, NONE) |
516 NVDEF(NVA0B5, LAUNCH_DMA, SRC_MEMORY_LAYOUT, PITCH) |
517 NVDEF(NVA0B5, LAUNCH_DMA, DST_MEMORY_LAYOUT, PITCH) |
518 NVDEF(NVA0B5, LAUNCH_DMA, MULTI_LINE_ENABLE, FALSE) |
519 NVDEF(NVA0B5, LAUNCH_DMA, REMAP_ENABLE, TRUE) |
520 NVDEF(NVA0B5, LAUNCH_DMA, BYPASS_L2, USE_PTE_SETTING));
521 return 0;
522}
523
524static int
525nouveau_dmem_migrate_init(struct nouveau_drm *drm)
526{
527 switch (drm->ttm.copy.oclass) {
528 case PASCAL_DMA_COPY_A:
529 case PASCAL_DMA_COPY_B:
530 case VOLTA_DMA_COPY_A:
531 case TURING_DMA_COPY_A:
532 drm->dmem->migrate.copy_func = nvc0b5_migrate_copy;
533 drm->dmem->migrate.clear_func = nvc0b5_migrate_clear;
534 drm->dmem->migrate.chan = drm->ttm.chan;
535 return 0;
536 default:
537 break;
538 }
539 return -ENODEV;
540}
541
542void
543nouveau_dmem_init(struct nouveau_drm *drm)
544{
545 int ret;
546
547 /* This only make sense on PASCAL or newer */
548 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_PASCAL)
549 return;
550
551 if (!(drm->dmem = kzalloc(sizeof(*drm->dmem), GFP_KERNEL)))
552 return;
553
554 drm->dmem->drm = drm;
555 mutex_init(&drm->dmem->mutex);
556 INIT_LIST_HEAD(&drm->dmem->chunks);
557 mutex_init(&drm->dmem->mutex);
558 spin_lock_init(&drm->dmem->lock);
559
560 /* Initialize migration dma helpers before registering memory */
561 ret = nouveau_dmem_migrate_init(drm);
562 if (ret) {
563 kfree(drm->dmem);
564 drm->dmem = NULL;
565 }
566}
567
568static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
569 struct nouveau_svmm *svmm, unsigned long src,
570 dma_addr_t *dma_addr, u64 *pfn)
571{
572 struct device *dev = drm->dev->dev;
573 struct page *dpage, *spage;
574 unsigned long paddr;
575
576 spage = migrate_pfn_to_page(src);
577 if (!(src & MIGRATE_PFN_MIGRATE))
578 goto out;
579
580 dpage = nouveau_dmem_page_alloc_locked(drm);
581 if (!dpage)
582 goto out;
583
584 paddr = nouveau_dmem_page_addr(dpage);
585 if (spage) {
586 *dma_addr = dma_map_page(dev, spage, 0, page_size(spage),
587 DMA_BIDIRECTIONAL);
588 if (dma_mapping_error(dev, *dma_addr))
589 goto out_free_page;
590 if (drm->dmem->migrate.copy_func(drm, 1,
591 NOUVEAU_APER_VRAM, paddr, NOUVEAU_APER_HOST, *dma_addr))
592 goto out_dma_unmap;
593 } else {
594 *dma_addr = DMA_MAPPING_ERROR;
595 if (drm->dmem->migrate.clear_func(drm, page_size(dpage),
596 NOUVEAU_APER_VRAM, paddr))
597 goto out_free_page;
598 }
599
600 dpage->zone_device_data = svmm;
601 *pfn = NVIF_VMM_PFNMAP_V0_V | NVIF_VMM_PFNMAP_V0_VRAM |
602 ((paddr >> PAGE_SHIFT) << NVIF_VMM_PFNMAP_V0_ADDR_SHIFT);
603 if (src & MIGRATE_PFN_WRITE)
604 *pfn |= NVIF_VMM_PFNMAP_V0_W;
605 return migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
606
607out_dma_unmap:
608 dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
609out_free_page:
610 nouveau_dmem_page_free_locked(drm, dpage);
611out:
612 *pfn = NVIF_VMM_PFNMAP_V0_NONE;
613 return 0;
614}
615
616static void nouveau_dmem_migrate_chunk(struct nouveau_drm *drm,
617 struct nouveau_svmm *svmm, struct migrate_vma *args,
618 dma_addr_t *dma_addrs, u64 *pfns)
619{
620 struct nouveau_fence *fence;
621 unsigned long addr = args->start, nr_dma = 0, i;
622
623 for (i = 0; addr < args->end; i++) {
624 args->dst[i] = nouveau_dmem_migrate_copy_one(drm, svmm,
625 args->src[i], dma_addrs + nr_dma, pfns + i);
626 if (!dma_mapping_error(drm->dev->dev, dma_addrs[nr_dma]))
627 nr_dma++;
628 addr += PAGE_SIZE;
629 }
630
631 nouveau_fence_new(drm->dmem->migrate.chan, false, &fence);
632 migrate_vma_pages(args);
633 nouveau_dmem_fence_done(&fence);
634 nouveau_pfns_map(svmm, args->vma->vm_mm, args->start, pfns, i);
635
636 while (nr_dma--) {
637 dma_unmap_page(drm->dev->dev, dma_addrs[nr_dma], PAGE_SIZE,
638 DMA_BIDIRECTIONAL);
639 }
640 migrate_vma_finalize(args);
641}
642
643int
644nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
645 struct nouveau_svmm *svmm,
646 struct vm_area_struct *vma,
647 unsigned long start,
648 unsigned long end)
649{
650 unsigned long npages = (end - start) >> PAGE_SHIFT;
651 unsigned long max = min(SG_MAX_SINGLE_ALLOC, npages);
652 dma_addr_t *dma_addrs;
653 struct migrate_vma args = {
654 .vma = vma,
655 .start = start,
656 .pgmap_owner = drm->dev,
657 .flags = MIGRATE_VMA_SELECT_SYSTEM,
658 };
659 unsigned long i;
660 u64 *pfns;
661 int ret = -ENOMEM;
662
663 if (drm->dmem == NULL)
664 return -ENODEV;
665
666 args.src = kcalloc(max, sizeof(*args.src), GFP_KERNEL);
667 if (!args.src)
668 goto out;
669 args.dst = kcalloc(max, sizeof(*args.dst), GFP_KERNEL);
670 if (!args.dst)
671 goto out_free_src;
672
673 dma_addrs = kmalloc_array(max, sizeof(*dma_addrs), GFP_KERNEL);
674 if (!dma_addrs)
675 goto out_free_dst;
676
677 pfns = nouveau_pfns_alloc(max);
678 if (!pfns)
679 goto out_free_dma;
680
681 for (i = 0; i < npages; i += max) {
682 args.end = start + (max << PAGE_SHIFT);
683 ret = migrate_vma_setup(&args);
684 if (ret)
685 goto out_free_pfns;
686
687 if (args.cpages)
688 nouveau_dmem_migrate_chunk(drm, svmm, &args, dma_addrs,
689 pfns);
690 args.start = args.end;
691 }
692
693 ret = 0;
694out_free_pfns:
695 nouveau_pfns_free(pfns);
696out_free_dma:
697 kfree(dma_addrs);
698out_free_dst:
699 kfree(args.dst);
700out_free_src:
701 kfree(args.src);
702out:
703 return ret;
704}