Loading...
Note: File does not exist in v4.6.
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "nouveau_svm.h"
23#include "nouveau_drv.h"
24#include "nouveau_chan.h"
25#include "nouveau_dmem.h"
26
27#include <nvif/notify.h>
28#include <nvif/object.h>
29#include <nvif/vmm.h>
30
31#include <nvif/class.h>
32#include <nvif/clb069.h>
33#include <nvif/ifc00d.h>
34
35#include <linux/sched/mm.h>
36#include <linux/sort.h>
37#include <linux/hmm.h>
38#include <linux/rmap.h>
39
40struct nouveau_svm {
41 struct nouveau_drm *drm;
42 struct mutex mutex;
43 struct list_head inst;
44
45 struct nouveau_svm_fault_buffer {
46 int id;
47 struct nvif_object object;
48 u32 entries;
49 u32 getaddr;
50 u32 putaddr;
51 u32 get;
52 u32 put;
53 struct nvif_notify notify;
54
55 struct nouveau_svm_fault {
56 u64 inst;
57 u64 addr;
58 u64 time;
59 u32 engine;
60 u8 gpc;
61 u8 hub;
62 u8 access;
63 u8 client;
64 u8 fault;
65 struct nouveau_svmm *svmm;
66 } **fault;
67 int fault_nr;
68 } buffer[1];
69};
70
71#define FAULT_ACCESS_READ 0
72#define FAULT_ACCESS_WRITE 1
73#define FAULT_ACCESS_ATOMIC 2
74#define FAULT_ACCESS_PREFETCH 3
75
76#define SVM_DBG(s,f,a...) NV_DEBUG((s)->drm, "svm: "f"\n", ##a)
77#define SVM_ERR(s,f,a...) NV_WARN((s)->drm, "svm: "f"\n", ##a)
78
79struct nouveau_pfnmap_args {
80 struct nvif_ioctl_v0 i;
81 struct nvif_ioctl_mthd_v0 m;
82 struct nvif_vmm_pfnmap_v0 p;
83};
84
85struct nouveau_ivmm {
86 struct nouveau_svmm *svmm;
87 u64 inst;
88 struct list_head head;
89};
90
91static struct nouveau_ivmm *
92nouveau_ivmm_find(struct nouveau_svm *svm, u64 inst)
93{
94 struct nouveau_ivmm *ivmm;
95 list_for_each_entry(ivmm, &svm->inst, head) {
96 if (ivmm->inst == inst)
97 return ivmm;
98 }
99 return NULL;
100}
101
102#define SVMM_DBG(s,f,a...) \
103 NV_DEBUG((s)->vmm->cli->drm, "svm-%p: "f"\n", (s), ##a)
104#define SVMM_ERR(s,f,a...) \
105 NV_WARN((s)->vmm->cli->drm, "svm-%p: "f"\n", (s), ##a)
106
107int
108nouveau_svmm_bind(struct drm_device *dev, void *data,
109 struct drm_file *file_priv)
110{
111 struct nouveau_cli *cli = nouveau_cli(file_priv);
112 struct drm_nouveau_svm_bind *args = data;
113 unsigned target, cmd, priority;
114 unsigned long addr, end;
115 struct mm_struct *mm;
116
117 args->va_start &= PAGE_MASK;
118 args->va_end = ALIGN(args->va_end, PAGE_SIZE);
119
120 /* Sanity check arguments */
121 if (args->reserved0 || args->reserved1)
122 return -EINVAL;
123 if (args->header & (~NOUVEAU_SVM_BIND_VALID_MASK))
124 return -EINVAL;
125 if (args->va_start >= args->va_end)
126 return -EINVAL;
127
128 cmd = args->header >> NOUVEAU_SVM_BIND_COMMAND_SHIFT;
129 cmd &= NOUVEAU_SVM_BIND_COMMAND_MASK;
130 switch (cmd) {
131 case NOUVEAU_SVM_BIND_COMMAND__MIGRATE:
132 break;
133 default:
134 return -EINVAL;
135 }
136
137 priority = args->header >> NOUVEAU_SVM_BIND_PRIORITY_SHIFT;
138 priority &= NOUVEAU_SVM_BIND_PRIORITY_MASK;
139
140 /* FIXME support CPU target ie all target value < GPU_VRAM */
141 target = args->header >> NOUVEAU_SVM_BIND_TARGET_SHIFT;
142 target &= NOUVEAU_SVM_BIND_TARGET_MASK;
143 switch (target) {
144 case NOUVEAU_SVM_BIND_TARGET__GPU_VRAM:
145 break;
146 default:
147 return -EINVAL;
148 }
149
150 /*
151 * FIXME: For now refuse non 0 stride, we need to change the migrate
152 * kernel function to handle stride to avoid to create a mess within
153 * each device driver.
154 */
155 if (args->stride)
156 return -EINVAL;
157
158 /*
159 * Ok we are ask to do something sane, for now we only support migrate
160 * commands but we will add things like memory policy (what to do on
161 * page fault) and maybe some other commands.
162 */
163
164 mm = get_task_mm(current);
165 mmap_read_lock(mm);
166
167 if (!cli->svm.svmm) {
168 mmap_read_unlock(mm);
169 return -EINVAL;
170 }
171
172 for (addr = args->va_start, end = args->va_end; addr < end;) {
173 struct vm_area_struct *vma;
174 unsigned long next;
175
176 vma = find_vma_intersection(mm, addr, end);
177 if (!vma)
178 break;
179
180 addr = max(addr, vma->vm_start);
181 next = min(vma->vm_end, end);
182 /* This is a best effort so we ignore errors */
183 nouveau_dmem_migrate_vma(cli->drm, cli->svm.svmm, vma, addr,
184 next);
185 addr = next;
186 }
187
188 /*
189 * FIXME Return the number of page we have migrated, again we need to
190 * update the migrate API to return that information so that we can
191 * report it to user space.
192 */
193 args->result = 0;
194
195 mmap_read_unlock(mm);
196 mmput(mm);
197
198 return 0;
199}
200
201/* Unlink channel instance from SVMM. */
202void
203nouveau_svmm_part(struct nouveau_svmm *svmm, u64 inst)
204{
205 struct nouveau_ivmm *ivmm;
206 if (svmm) {
207 mutex_lock(&svmm->vmm->cli->drm->svm->mutex);
208 ivmm = nouveau_ivmm_find(svmm->vmm->cli->drm->svm, inst);
209 if (ivmm) {
210 list_del(&ivmm->head);
211 kfree(ivmm);
212 }
213 mutex_unlock(&svmm->vmm->cli->drm->svm->mutex);
214 }
215}
216
217/* Link channel instance to SVMM. */
218int
219nouveau_svmm_join(struct nouveau_svmm *svmm, u64 inst)
220{
221 struct nouveau_ivmm *ivmm;
222 if (svmm) {
223 if (!(ivmm = kmalloc(sizeof(*ivmm), GFP_KERNEL)))
224 return -ENOMEM;
225 ivmm->svmm = svmm;
226 ivmm->inst = inst;
227
228 mutex_lock(&svmm->vmm->cli->drm->svm->mutex);
229 list_add(&ivmm->head, &svmm->vmm->cli->drm->svm->inst);
230 mutex_unlock(&svmm->vmm->cli->drm->svm->mutex);
231 }
232 return 0;
233}
234
235/* Invalidate SVMM address-range on GPU. */
236void
237nouveau_svmm_invalidate(struct nouveau_svmm *svmm, u64 start, u64 limit)
238{
239 if (limit > start) {
240 nvif_object_mthd(&svmm->vmm->vmm.object, NVIF_VMM_V0_PFNCLR,
241 &(struct nvif_vmm_pfnclr_v0) {
242 .addr = start,
243 .size = limit - start,
244 }, sizeof(struct nvif_vmm_pfnclr_v0));
245 }
246}
247
248static int
249nouveau_svmm_invalidate_range_start(struct mmu_notifier *mn,
250 const struct mmu_notifier_range *update)
251{
252 struct nouveau_svmm *svmm =
253 container_of(mn, struct nouveau_svmm, notifier);
254 unsigned long start = update->start;
255 unsigned long limit = update->end;
256
257 if (!mmu_notifier_range_blockable(update))
258 return -EAGAIN;
259
260 SVMM_DBG(svmm, "invalidate %016lx-%016lx", start, limit);
261
262 mutex_lock(&svmm->mutex);
263 if (unlikely(!svmm->vmm))
264 goto out;
265
266 /*
267 * Ignore invalidation callbacks for device private pages since
268 * the invalidation is handled as part of the migration process.
269 */
270 if (update->event == MMU_NOTIFY_MIGRATE &&
271 update->owner == svmm->vmm->cli->drm->dev)
272 goto out;
273
274 if (limit > svmm->unmanaged.start && start < svmm->unmanaged.limit) {
275 if (start < svmm->unmanaged.start) {
276 nouveau_svmm_invalidate(svmm, start,
277 svmm->unmanaged.limit);
278 }
279 start = svmm->unmanaged.limit;
280 }
281
282 nouveau_svmm_invalidate(svmm, start, limit);
283
284out:
285 mutex_unlock(&svmm->mutex);
286 return 0;
287}
288
289static void nouveau_svmm_free_notifier(struct mmu_notifier *mn)
290{
291 kfree(container_of(mn, struct nouveau_svmm, notifier));
292}
293
294static const struct mmu_notifier_ops nouveau_mn_ops = {
295 .invalidate_range_start = nouveau_svmm_invalidate_range_start,
296 .free_notifier = nouveau_svmm_free_notifier,
297};
298
299void
300nouveau_svmm_fini(struct nouveau_svmm **psvmm)
301{
302 struct nouveau_svmm *svmm = *psvmm;
303 if (svmm) {
304 mutex_lock(&svmm->mutex);
305 svmm->vmm = NULL;
306 mutex_unlock(&svmm->mutex);
307 mmu_notifier_put(&svmm->notifier);
308 *psvmm = NULL;
309 }
310}
311
312int
313nouveau_svmm_init(struct drm_device *dev, void *data,
314 struct drm_file *file_priv)
315{
316 struct nouveau_cli *cli = nouveau_cli(file_priv);
317 struct nouveau_svmm *svmm;
318 struct drm_nouveau_svm_init *args = data;
319 int ret;
320
321 /* We need to fail if svm is disabled */
322 if (!cli->drm->svm)
323 return -ENOSYS;
324
325 /* Allocate tracking for SVM-enabled VMM. */
326 if (!(svmm = kzalloc(sizeof(*svmm), GFP_KERNEL)))
327 return -ENOMEM;
328 svmm->vmm = &cli->svm;
329 svmm->unmanaged.start = args->unmanaged_addr;
330 svmm->unmanaged.limit = args->unmanaged_addr + args->unmanaged_size;
331 mutex_init(&svmm->mutex);
332
333 /* Check that SVM isn't already enabled for the client. */
334 mutex_lock(&cli->mutex);
335 if (cli->svm.cli) {
336 ret = -EBUSY;
337 goto out_free;
338 }
339
340 /* Allocate a new GPU VMM that can support SVM (managed by the
341 * client, with replayable faults enabled).
342 *
343 * All future channel/memory allocations will make use of this
344 * VMM instead of the standard one.
345 */
346 ret = nvif_vmm_ctor(&cli->mmu, "svmVmm",
347 cli->vmm.vmm.object.oclass, true,
348 args->unmanaged_addr, args->unmanaged_size,
349 &(struct gp100_vmm_v0) {
350 .fault_replay = true,
351 }, sizeof(struct gp100_vmm_v0), &cli->svm.vmm);
352 if (ret)
353 goto out_free;
354
355 mmap_write_lock(current->mm);
356 svmm->notifier.ops = &nouveau_mn_ops;
357 ret = __mmu_notifier_register(&svmm->notifier, current->mm);
358 if (ret)
359 goto out_mm_unlock;
360 /* Note, ownership of svmm transfers to mmu_notifier */
361
362 cli->svm.svmm = svmm;
363 cli->svm.cli = cli;
364 mmap_write_unlock(current->mm);
365 mutex_unlock(&cli->mutex);
366 return 0;
367
368out_mm_unlock:
369 mmap_write_unlock(current->mm);
370out_free:
371 mutex_unlock(&cli->mutex);
372 kfree(svmm);
373 return ret;
374}
375
376/* Issue fault replay for GPU to retry accesses that faulted previously. */
377static void
378nouveau_svm_fault_replay(struct nouveau_svm *svm)
379{
380 SVM_DBG(svm, "replay");
381 WARN_ON(nvif_object_mthd(&svm->drm->client.vmm.vmm.object,
382 GP100_VMM_VN_FAULT_REPLAY,
383 &(struct gp100_vmm_fault_replay_vn) {},
384 sizeof(struct gp100_vmm_fault_replay_vn)));
385}
386
387/* Cancel a replayable fault that could not be handled.
388 *
389 * Cancelling the fault will trigger recovery to reset the engine
390 * and kill the offending channel (ie. GPU SIGSEGV).
391 */
392static void
393nouveau_svm_fault_cancel(struct nouveau_svm *svm,
394 u64 inst, u8 hub, u8 gpc, u8 client)
395{
396 SVM_DBG(svm, "cancel %016llx %d %02x %02x", inst, hub, gpc, client);
397 WARN_ON(nvif_object_mthd(&svm->drm->client.vmm.vmm.object,
398 GP100_VMM_VN_FAULT_CANCEL,
399 &(struct gp100_vmm_fault_cancel_v0) {
400 .hub = hub,
401 .gpc = gpc,
402 .client = client,
403 .inst = inst,
404 }, sizeof(struct gp100_vmm_fault_cancel_v0)));
405}
406
407static void
408nouveau_svm_fault_cancel_fault(struct nouveau_svm *svm,
409 struct nouveau_svm_fault *fault)
410{
411 nouveau_svm_fault_cancel(svm, fault->inst,
412 fault->hub,
413 fault->gpc,
414 fault->client);
415}
416
417static int
418nouveau_svm_fault_priority(u8 fault)
419{
420 switch (fault) {
421 case FAULT_ACCESS_PREFETCH:
422 return 0;
423 case FAULT_ACCESS_READ:
424 return 1;
425 case FAULT_ACCESS_WRITE:
426 return 2;
427 case FAULT_ACCESS_ATOMIC:
428 return 3;
429 default:
430 WARN_ON_ONCE(1);
431 return -1;
432 }
433}
434
435static int
436nouveau_svm_fault_cmp(const void *a, const void *b)
437{
438 const struct nouveau_svm_fault *fa = *(struct nouveau_svm_fault **)a;
439 const struct nouveau_svm_fault *fb = *(struct nouveau_svm_fault **)b;
440 int ret;
441 if ((ret = (s64)fa->inst - fb->inst))
442 return ret;
443 if ((ret = (s64)fa->addr - fb->addr))
444 return ret;
445 return nouveau_svm_fault_priority(fa->access) -
446 nouveau_svm_fault_priority(fb->access);
447}
448
449static void
450nouveau_svm_fault_cache(struct nouveau_svm *svm,
451 struct nouveau_svm_fault_buffer *buffer, u32 offset)
452{
453 struct nvif_object *memory = &buffer->object;
454 const u32 instlo = nvif_rd32(memory, offset + 0x00);
455 const u32 insthi = nvif_rd32(memory, offset + 0x04);
456 const u32 addrlo = nvif_rd32(memory, offset + 0x08);
457 const u32 addrhi = nvif_rd32(memory, offset + 0x0c);
458 const u32 timelo = nvif_rd32(memory, offset + 0x10);
459 const u32 timehi = nvif_rd32(memory, offset + 0x14);
460 const u32 engine = nvif_rd32(memory, offset + 0x18);
461 const u32 info = nvif_rd32(memory, offset + 0x1c);
462 const u64 inst = (u64)insthi << 32 | instlo;
463 const u8 gpc = (info & 0x1f000000) >> 24;
464 const u8 hub = (info & 0x00100000) >> 20;
465 const u8 client = (info & 0x00007f00) >> 8;
466 struct nouveau_svm_fault *fault;
467
468 //XXX: i think we're supposed to spin waiting */
469 if (WARN_ON(!(info & 0x80000000)))
470 return;
471
472 nvif_mask(memory, offset + 0x1c, 0x80000000, 0x00000000);
473
474 if (!buffer->fault[buffer->fault_nr]) {
475 fault = kmalloc(sizeof(*fault), GFP_KERNEL);
476 if (WARN_ON(!fault)) {
477 nouveau_svm_fault_cancel(svm, inst, hub, gpc, client);
478 return;
479 }
480 buffer->fault[buffer->fault_nr] = fault;
481 }
482
483 fault = buffer->fault[buffer->fault_nr++];
484 fault->inst = inst;
485 fault->addr = (u64)addrhi << 32 | addrlo;
486 fault->time = (u64)timehi << 32 | timelo;
487 fault->engine = engine;
488 fault->gpc = gpc;
489 fault->hub = hub;
490 fault->access = (info & 0x000f0000) >> 16;
491 fault->client = client;
492 fault->fault = (info & 0x0000001f);
493
494 SVM_DBG(svm, "fault %016llx %016llx %02x",
495 fault->inst, fault->addr, fault->access);
496}
497
498struct svm_notifier {
499 struct mmu_interval_notifier notifier;
500 struct nouveau_svmm *svmm;
501};
502
503static bool nouveau_svm_range_invalidate(struct mmu_interval_notifier *mni,
504 const struct mmu_notifier_range *range,
505 unsigned long cur_seq)
506{
507 struct svm_notifier *sn =
508 container_of(mni, struct svm_notifier, notifier);
509
510 if (range->event == MMU_NOTIFY_EXCLUSIVE &&
511 range->owner == sn->svmm->vmm->cli->drm->dev)
512 return true;
513
514 /*
515 * serializes the update to mni->invalidate_seq done by caller and
516 * prevents invalidation of the PTE from progressing while HW is being
517 * programmed. This is very hacky and only works because the normal
518 * notifier that does invalidation is always called after the range
519 * notifier.
520 */
521 if (mmu_notifier_range_blockable(range))
522 mutex_lock(&sn->svmm->mutex);
523 else if (!mutex_trylock(&sn->svmm->mutex))
524 return false;
525 mmu_interval_set_seq(mni, cur_seq);
526 mutex_unlock(&sn->svmm->mutex);
527 return true;
528}
529
530static const struct mmu_interval_notifier_ops nouveau_svm_mni_ops = {
531 .invalidate = nouveau_svm_range_invalidate,
532};
533
534static void nouveau_hmm_convert_pfn(struct nouveau_drm *drm,
535 struct hmm_range *range,
536 struct nouveau_pfnmap_args *args)
537{
538 struct page *page;
539
540 /*
541 * The address prepared here is passed through nvif_object_ioctl()
542 * to an eventual DMA map in something like gp100_vmm_pgt_pfn()
543 *
544 * This is all just encoding the internal hmm representation into a
545 * different nouveau internal representation.
546 */
547 if (!(range->hmm_pfns[0] & HMM_PFN_VALID)) {
548 args->p.phys[0] = 0;
549 return;
550 }
551
552 page = hmm_pfn_to_page(range->hmm_pfns[0]);
553 /*
554 * Only map compound pages to the GPU if the CPU is also mapping the
555 * page as a compound page. Otherwise, the PTE protections might not be
556 * consistent (e.g., CPU only maps part of a compound page).
557 * Note that the underlying page might still be larger than the
558 * CPU mapping (e.g., a PUD sized compound page partially mapped with
559 * a PMD sized page table entry).
560 */
561 if (hmm_pfn_to_map_order(range->hmm_pfns[0])) {
562 unsigned long addr = args->p.addr;
563
564 args->p.page = hmm_pfn_to_map_order(range->hmm_pfns[0]) +
565 PAGE_SHIFT;
566 args->p.size = 1UL << args->p.page;
567 args->p.addr &= ~(args->p.size - 1);
568 page -= (addr - args->p.addr) >> PAGE_SHIFT;
569 }
570 if (is_device_private_page(page))
571 args->p.phys[0] = nouveau_dmem_page_addr(page) |
572 NVIF_VMM_PFNMAP_V0_V |
573 NVIF_VMM_PFNMAP_V0_VRAM;
574 else
575 args->p.phys[0] = page_to_phys(page) |
576 NVIF_VMM_PFNMAP_V0_V |
577 NVIF_VMM_PFNMAP_V0_HOST;
578 if (range->hmm_pfns[0] & HMM_PFN_WRITE)
579 args->p.phys[0] |= NVIF_VMM_PFNMAP_V0_W;
580}
581
582static int nouveau_atomic_range_fault(struct nouveau_svmm *svmm,
583 struct nouveau_drm *drm,
584 struct nouveau_pfnmap_args *args, u32 size,
585 struct svm_notifier *notifier)
586{
587 unsigned long timeout =
588 jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
589 struct mm_struct *mm = svmm->notifier.mm;
590 struct page *page;
591 unsigned long start = args->p.addr;
592 unsigned long notifier_seq;
593 int ret = 0;
594
595 ret = mmu_interval_notifier_insert(¬ifier->notifier, mm,
596 args->p.addr, args->p.size,
597 &nouveau_svm_mni_ops);
598 if (ret)
599 return ret;
600
601 while (true) {
602 if (time_after(jiffies, timeout)) {
603 ret = -EBUSY;
604 goto out;
605 }
606
607 notifier_seq = mmu_interval_read_begin(¬ifier->notifier);
608 mmap_read_lock(mm);
609 ret = make_device_exclusive_range(mm, start, start + PAGE_SIZE,
610 &page, drm->dev);
611 mmap_read_unlock(mm);
612 if (ret <= 0 || !page) {
613 ret = -EINVAL;
614 goto out;
615 }
616
617 mutex_lock(&svmm->mutex);
618 if (!mmu_interval_read_retry(¬ifier->notifier,
619 notifier_seq))
620 break;
621 mutex_unlock(&svmm->mutex);
622 }
623
624 /* Map the page on the GPU. */
625 args->p.page = 12;
626 args->p.size = PAGE_SIZE;
627 args->p.addr = start;
628 args->p.phys[0] = page_to_phys(page) |
629 NVIF_VMM_PFNMAP_V0_V |
630 NVIF_VMM_PFNMAP_V0_W |
631 NVIF_VMM_PFNMAP_V0_A |
632 NVIF_VMM_PFNMAP_V0_HOST;
633
634 ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, size, NULL);
635 mutex_unlock(&svmm->mutex);
636
637 unlock_page(page);
638 put_page(page);
639
640out:
641 mmu_interval_notifier_remove(¬ifier->notifier);
642 return ret;
643}
644
645static int nouveau_range_fault(struct nouveau_svmm *svmm,
646 struct nouveau_drm *drm,
647 struct nouveau_pfnmap_args *args, u32 size,
648 unsigned long hmm_flags,
649 struct svm_notifier *notifier)
650{
651 unsigned long timeout =
652 jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
653 /* Have HMM fault pages within the fault window to the GPU. */
654 unsigned long hmm_pfns[1];
655 struct hmm_range range = {
656 .notifier = ¬ifier->notifier,
657 .default_flags = hmm_flags,
658 .hmm_pfns = hmm_pfns,
659 .dev_private_owner = drm->dev,
660 };
661 struct mm_struct *mm = svmm->notifier.mm;
662 int ret;
663
664 ret = mmu_interval_notifier_insert(¬ifier->notifier, mm,
665 args->p.addr, args->p.size,
666 &nouveau_svm_mni_ops);
667 if (ret)
668 return ret;
669
670 range.start = notifier->notifier.interval_tree.start;
671 range.end = notifier->notifier.interval_tree.last + 1;
672
673 while (true) {
674 if (time_after(jiffies, timeout)) {
675 ret = -EBUSY;
676 goto out;
677 }
678
679 range.notifier_seq = mmu_interval_read_begin(range.notifier);
680 mmap_read_lock(mm);
681 ret = hmm_range_fault(&range);
682 mmap_read_unlock(mm);
683 if (ret) {
684 if (ret == -EBUSY)
685 continue;
686 goto out;
687 }
688
689 mutex_lock(&svmm->mutex);
690 if (mmu_interval_read_retry(range.notifier,
691 range.notifier_seq)) {
692 mutex_unlock(&svmm->mutex);
693 continue;
694 }
695 break;
696 }
697
698 nouveau_hmm_convert_pfn(drm, &range, args);
699
700 ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, size, NULL);
701 mutex_unlock(&svmm->mutex);
702
703out:
704 mmu_interval_notifier_remove(¬ifier->notifier);
705
706 return ret;
707}
708
709static int
710nouveau_svm_fault(struct nvif_notify *notify)
711{
712 struct nouveau_svm_fault_buffer *buffer =
713 container_of(notify, typeof(*buffer), notify);
714 struct nouveau_svm *svm =
715 container_of(buffer, typeof(*svm), buffer[buffer->id]);
716 struct nvif_object *device = &svm->drm->client.device.object;
717 struct nouveau_svmm *svmm;
718 struct {
719 struct nouveau_pfnmap_args i;
720 u64 phys[1];
721 } args;
722 unsigned long hmm_flags;
723 u64 inst, start, limit;
724 int fi, fn;
725 int replay = 0, atomic = 0, ret;
726
727 /* Parse available fault buffer entries into a cache, and update
728 * the GET pointer so HW can reuse the entries.
729 */
730 SVM_DBG(svm, "fault handler");
731 if (buffer->get == buffer->put) {
732 buffer->put = nvif_rd32(device, buffer->putaddr);
733 buffer->get = nvif_rd32(device, buffer->getaddr);
734 if (buffer->get == buffer->put)
735 return NVIF_NOTIFY_KEEP;
736 }
737 buffer->fault_nr = 0;
738
739 SVM_DBG(svm, "get %08x put %08x", buffer->get, buffer->put);
740 while (buffer->get != buffer->put) {
741 nouveau_svm_fault_cache(svm, buffer, buffer->get * 0x20);
742 if (++buffer->get == buffer->entries)
743 buffer->get = 0;
744 }
745 nvif_wr32(device, buffer->getaddr, buffer->get);
746 SVM_DBG(svm, "%d fault(s) pending", buffer->fault_nr);
747
748 /* Sort parsed faults by instance pointer to prevent unnecessary
749 * instance to SVMM translations, followed by address and access
750 * type to reduce the amount of work when handling the faults.
751 */
752 sort(buffer->fault, buffer->fault_nr, sizeof(*buffer->fault),
753 nouveau_svm_fault_cmp, NULL);
754
755 /* Lookup SVMM structure for each unique instance pointer. */
756 mutex_lock(&svm->mutex);
757 for (fi = 0, svmm = NULL; fi < buffer->fault_nr; fi++) {
758 if (!svmm || buffer->fault[fi]->inst != inst) {
759 struct nouveau_ivmm *ivmm =
760 nouveau_ivmm_find(svm, buffer->fault[fi]->inst);
761 svmm = ivmm ? ivmm->svmm : NULL;
762 inst = buffer->fault[fi]->inst;
763 SVM_DBG(svm, "inst %016llx -> svm-%p", inst, svmm);
764 }
765 buffer->fault[fi]->svmm = svmm;
766 }
767 mutex_unlock(&svm->mutex);
768
769 /* Process list of faults. */
770 args.i.i.version = 0;
771 args.i.i.type = NVIF_IOCTL_V0_MTHD;
772 args.i.m.version = 0;
773 args.i.m.method = NVIF_VMM_V0_PFNMAP;
774 args.i.p.version = 0;
775
776 for (fi = 0; fn = fi + 1, fi < buffer->fault_nr; fi = fn) {
777 struct svm_notifier notifier;
778 struct mm_struct *mm;
779
780 /* Cancel any faults from non-SVM channels. */
781 if (!(svmm = buffer->fault[fi]->svmm)) {
782 nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
783 continue;
784 }
785 SVMM_DBG(svmm, "addr %016llx", buffer->fault[fi]->addr);
786
787 /* We try and group handling of faults within a small
788 * window into a single update.
789 */
790 start = buffer->fault[fi]->addr;
791 limit = start + PAGE_SIZE;
792 if (start < svmm->unmanaged.limit)
793 limit = min_t(u64, limit, svmm->unmanaged.start);
794
795 /*
796 * Prepare the GPU-side update of all pages within the
797 * fault window, determining required pages and access
798 * permissions based on pending faults.
799 */
800 args.i.p.addr = start;
801 args.i.p.page = PAGE_SHIFT;
802 args.i.p.size = PAGE_SIZE;
803 /*
804 * Determine required permissions based on GPU fault
805 * access flags.
806 */
807 switch (buffer->fault[fi]->access) {
808 case 0: /* READ. */
809 hmm_flags = HMM_PFN_REQ_FAULT;
810 break;
811 case 2: /* ATOMIC. */
812 atomic = true;
813 break;
814 case 3: /* PREFETCH. */
815 hmm_flags = 0;
816 break;
817 default:
818 hmm_flags = HMM_PFN_REQ_FAULT | HMM_PFN_REQ_WRITE;
819 break;
820 }
821
822 mm = svmm->notifier.mm;
823 if (!mmget_not_zero(mm)) {
824 nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
825 continue;
826 }
827
828 notifier.svmm = svmm;
829 if (atomic)
830 ret = nouveau_atomic_range_fault(svmm, svm->drm,
831 &args.i, sizeof(args),
832 ¬ifier);
833 else
834 ret = nouveau_range_fault(svmm, svm->drm, &args.i,
835 sizeof(args), hmm_flags,
836 ¬ifier);
837 mmput(mm);
838
839 limit = args.i.p.addr + args.i.p.size;
840 for (fn = fi; ++fn < buffer->fault_nr; ) {
841 /* It's okay to skip over duplicate addresses from the
842 * same SVMM as faults are ordered by access type such
843 * that only the first one needs to be handled.
844 *
845 * ie. WRITE faults appear first, thus any handling of
846 * pending READ faults will already be satisfied.
847 * But if a large page is mapped, make sure subsequent
848 * fault addresses have sufficient access permission.
849 */
850 if (buffer->fault[fn]->svmm != svmm ||
851 buffer->fault[fn]->addr >= limit ||
852 (buffer->fault[fi]->access == FAULT_ACCESS_READ &&
853 !(args.phys[0] & NVIF_VMM_PFNMAP_V0_V)) ||
854 (buffer->fault[fi]->access != FAULT_ACCESS_READ &&
855 buffer->fault[fi]->access != FAULT_ACCESS_PREFETCH &&
856 !(args.phys[0] & NVIF_VMM_PFNMAP_V0_W)) ||
857 (buffer->fault[fi]->access != FAULT_ACCESS_READ &&
858 buffer->fault[fi]->access != FAULT_ACCESS_WRITE &&
859 buffer->fault[fi]->access != FAULT_ACCESS_PREFETCH &&
860 !(args.phys[0] & NVIF_VMM_PFNMAP_V0_A)))
861 break;
862 }
863
864 /* If handling failed completely, cancel all faults. */
865 if (ret) {
866 while (fi < fn) {
867 struct nouveau_svm_fault *fault =
868 buffer->fault[fi++];
869
870 nouveau_svm_fault_cancel_fault(svm, fault);
871 }
872 } else
873 replay++;
874 }
875
876 /* Issue fault replay to the GPU. */
877 if (replay)
878 nouveau_svm_fault_replay(svm);
879 return NVIF_NOTIFY_KEEP;
880}
881
882static struct nouveau_pfnmap_args *
883nouveau_pfns_to_args(void *pfns)
884{
885 return container_of(pfns, struct nouveau_pfnmap_args, p.phys);
886}
887
888u64 *
889nouveau_pfns_alloc(unsigned long npages)
890{
891 struct nouveau_pfnmap_args *args;
892
893 args = kzalloc(struct_size(args, p.phys, npages), GFP_KERNEL);
894 if (!args)
895 return NULL;
896
897 args->i.type = NVIF_IOCTL_V0_MTHD;
898 args->m.method = NVIF_VMM_V0_PFNMAP;
899 args->p.page = PAGE_SHIFT;
900
901 return args->p.phys;
902}
903
904void
905nouveau_pfns_free(u64 *pfns)
906{
907 struct nouveau_pfnmap_args *args = nouveau_pfns_to_args(pfns);
908
909 kfree(args);
910}
911
912void
913nouveau_pfns_map(struct nouveau_svmm *svmm, struct mm_struct *mm,
914 unsigned long addr, u64 *pfns, unsigned long npages)
915{
916 struct nouveau_pfnmap_args *args = nouveau_pfns_to_args(pfns);
917 int ret;
918
919 args->p.addr = addr;
920 args->p.size = npages << PAGE_SHIFT;
921
922 mutex_lock(&svmm->mutex);
923
924 ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, sizeof(*args) +
925 npages * sizeof(args->p.phys[0]), NULL);
926
927 mutex_unlock(&svmm->mutex);
928}
929
930static void
931nouveau_svm_fault_buffer_fini(struct nouveau_svm *svm, int id)
932{
933 struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id];
934 nvif_notify_put(&buffer->notify);
935}
936
937static int
938nouveau_svm_fault_buffer_init(struct nouveau_svm *svm, int id)
939{
940 struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id];
941 struct nvif_object *device = &svm->drm->client.device.object;
942 buffer->get = nvif_rd32(device, buffer->getaddr);
943 buffer->put = nvif_rd32(device, buffer->putaddr);
944 SVM_DBG(svm, "get %08x put %08x (init)", buffer->get, buffer->put);
945 return nvif_notify_get(&buffer->notify);
946}
947
948static void
949nouveau_svm_fault_buffer_dtor(struct nouveau_svm *svm, int id)
950{
951 struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id];
952 int i;
953
954 if (buffer->fault) {
955 for (i = 0; buffer->fault[i] && i < buffer->entries; i++)
956 kfree(buffer->fault[i]);
957 kvfree(buffer->fault);
958 }
959
960 nouveau_svm_fault_buffer_fini(svm, id);
961
962 nvif_notify_dtor(&buffer->notify);
963 nvif_object_dtor(&buffer->object);
964}
965
966static int
967nouveau_svm_fault_buffer_ctor(struct nouveau_svm *svm, s32 oclass, int id)
968{
969 struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id];
970 struct nouveau_drm *drm = svm->drm;
971 struct nvif_object *device = &drm->client.device.object;
972 struct nvif_clb069_v0 args = {};
973 int ret;
974
975 buffer->id = id;
976
977 ret = nvif_object_ctor(device, "svmFaultBuffer", 0, oclass, &args,
978 sizeof(args), &buffer->object);
979 if (ret < 0) {
980 SVM_ERR(svm, "Fault buffer allocation failed: %d", ret);
981 return ret;
982 }
983
984 nvif_object_map(&buffer->object, NULL, 0);
985 buffer->entries = args.entries;
986 buffer->getaddr = args.get;
987 buffer->putaddr = args.put;
988
989 ret = nvif_notify_ctor(&buffer->object, "svmFault", nouveau_svm_fault,
990 true, NVB069_V0_NTFY_FAULT, NULL, 0, 0,
991 &buffer->notify);
992 if (ret)
993 return ret;
994
995 buffer->fault = kvzalloc(sizeof(*buffer->fault) * buffer->entries, GFP_KERNEL);
996 if (!buffer->fault)
997 return -ENOMEM;
998
999 return nouveau_svm_fault_buffer_init(svm, id);
1000}
1001
1002void
1003nouveau_svm_resume(struct nouveau_drm *drm)
1004{
1005 struct nouveau_svm *svm = drm->svm;
1006 if (svm)
1007 nouveau_svm_fault_buffer_init(svm, 0);
1008}
1009
1010void
1011nouveau_svm_suspend(struct nouveau_drm *drm)
1012{
1013 struct nouveau_svm *svm = drm->svm;
1014 if (svm)
1015 nouveau_svm_fault_buffer_fini(svm, 0);
1016}
1017
1018void
1019nouveau_svm_fini(struct nouveau_drm *drm)
1020{
1021 struct nouveau_svm *svm = drm->svm;
1022 if (svm) {
1023 nouveau_svm_fault_buffer_dtor(svm, 0);
1024 kfree(drm->svm);
1025 drm->svm = NULL;
1026 }
1027}
1028
1029void
1030nouveau_svm_init(struct nouveau_drm *drm)
1031{
1032 static const struct nvif_mclass buffers[] = {
1033 { VOLTA_FAULT_BUFFER_A, 0 },
1034 { MAXWELL_FAULT_BUFFER_A, 0 },
1035 {}
1036 };
1037 struct nouveau_svm *svm;
1038 int ret;
1039
1040 /* Disable on Volta and newer until channel recovery is fixed,
1041 * otherwise clients will have a trivial way to trash the GPU
1042 * for everyone.
1043 */
1044 if (drm->client.device.info.family > NV_DEVICE_INFO_V0_PASCAL)
1045 return;
1046
1047 if (!(drm->svm = svm = kzalloc(sizeof(*drm->svm), GFP_KERNEL)))
1048 return;
1049
1050 drm->svm->drm = drm;
1051 mutex_init(&drm->svm->mutex);
1052 INIT_LIST_HEAD(&drm->svm->inst);
1053
1054 ret = nvif_mclass(&drm->client.device.object, buffers);
1055 if (ret < 0) {
1056 SVM_DBG(svm, "No supported fault buffer class");
1057 nouveau_svm_fini(drm);
1058 return;
1059 }
1060
1061 ret = nouveau_svm_fault_buffer_ctor(svm, buffers[ret].oclass, 0);
1062 if (ret) {
1063 nouveau_svm_fini(drm);
1064 return;
1065 }
1066
1067 SVM_DBG(svm, "Initialised");
1068}