Loading...
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#include <linux/mutex.h>
24#include <linux/log2.h>
25#include <linux/sched.h>
26#include <linux/sched/mm.h>
27#include <linux/sched/task.h>
28#include <linux/slab.h>
29#include <linux/amd-iommu.h>
30#include <linux/notifier.h>
31#include <linux/compat.h>
32#include <linux/mman.h>
33#include <linux/file.h>
34
35struct mm_struct;
36
37#include "kfd_priv.h"
38#include "kfd_device_queue_manager.h"
39#include "kfd_dbgmgr.h"
40#include "kfd_iommu.h"
41
42/*
43 * List of struct kfd_process (field kfd_process).
44 * Unique/indexed by mm_struct*
45 */
46DEFINE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE);
47static DEFINE_MUTEX(kfd_processes_mutex);
48
49DEFINE_SRCU(kfd_processes_srcu);
50
51/* For process termination handling */
52static struct workqueue_struct *kfd_process_wq;
53
54/* Ordered, single-threaded workqueue for restoring evicted
55 * processes. Restoring multiple processes concurrently under memory
56 * pressure can lead to processes blocking each other from validating
57 * their BOs and result in a live-lock situation where processes
58 * remain evicted indefinitely.
59 */
60static struct workqueue_struct *kfd_restore_wq;
61
62static struct kfd_process *find_process(const struct task_struct *thread);
63static void kfd_process_ref_release(struct kref *ref);
64static struct kfd_process *create_process(const struct task_struct *thread,
65 struct file *filep);
66
67static void evict_process_worker(struct work_struct *work);
68static void restore_process_worker(struct work_struct *work);
69
70
71int kfd_process_create_wq(void)
72{
73 if (!kfd_process_wq)
74 kfd_process_wq = alloc_workqueue("kfd_process_wq", 0, 0);
75 if (!kfd_restore_wq)
76 kfd_restore_wq = alloc_ordered_workqueue("kfd_restore_wq", 0);
77
78 if (!kfd_process_wq || !kfd_restore_wq) {
79 kfd_process_destroy_wq();
80 return -ENOMEM;
81 }
82
83 return 0;
84}
85
86void kfd_process_destroy_wq(void)
87{
88 if (kfd_process_wq) {
89 destroy_workqueue(kfd_process_wq);
90 kfd_process_wq = NULL;
91 }
92 if (kfd_restore_wq) {
93 destroy_workqueue(kfd_restore_wq);
94 kfd_restore_wq = NULL;
95 }
96}
97
98static void kfd_process_free_gpuvm(struct kgd_mem *mem,
99 struct kfd_process_device *pdd)
100{
101 struct kfd_dev *dev = pdd->dev;
102
103 dev->kfd2kgd->unmap_memory_to_gpu(dev->kgd, mem, pdd->vm);
104 dev->kfd2kgd->free_memory_of_gpu(dev->kgd, mem);
105}
106
107/* kfd_process_alloc_gpuvm - Allocate GPU VM for the KFD process
108 * This function should be only called right after the process
109 * is created and when kfd_processes_mutex is still being held
110 * to avoid concurrency. Because of that exclusiveness, we do
111 * not need to take p->mutex.
112 */
113static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
114 uint64_t gpu_va, uint32_t size,
115 uint32_t flags, void **kptr)
116{
117 struct kfd_dev *kdev = pdd->dev;
118 struct kgd_mem *mem = NULL;
119 int handle;
120 int err;
121
122 err = kdev->kfd2kgd->alloc_memory_of_gpu(kdev->kgd, gpu_va, size,
123 pdd->vm, &mem, NULL, flags);
124 if (err)
125 goto err_alloc_mem;
126
127 err = kdev->kfd2kgd->map_memory_to_gpu(kdev->kgd, mem, pdd->vm);
128 if (err)
129 goto err_map_mem;
130
131 err = kdev->kfd2kgd->sync_memory(kdev->kgd, mem, true);
132 if (err) {
133 pr_debug("Sync memory failed, wait interrupted by user signal\n");
134 goto sync_memory_failed;
135 }
136
137 /* Create an obj handle so kfd_process_device_remove_obj_handle
138 * will take care of the bo removal when the process finishes.
139 * We do not need to take p->mutex, because the process is just
140 * created and the ioctls have not had the chance to run.
141 */
142 handle = kfd_process_device_create_obj_handle(pdd, mem);
143
144 if (handle < 0) {
145 err = handle;
146 goto free_gpuvm;
147 }
148
149 if (kptr) {
150 err = kdev->kfd2kgd->map_gtt_bo_to_kernel(kdev->kgd,
151 (struct kgd_mem *)mem, kptr, NULL);
152 if (err) {
153 pr_debug("Map GTT BO to kernel failed\n");
154 goto free_obj_handle;
155 }
156 }
157
158 return err;
159
160free_obj_handle:
161 kfd_process_device_remove_obj_handle(pdd, handle);
162free_gpuvm:
163sync_memory_failed:
164 kfd_process_free_gpuvm(mem, pdd);
165 return err;
166
167err_map_mem:
168 kdev->kfd2kgd->free_memory_of_gpu(kdev->kgd, mem);
169err_alloc_mem:
170 *kptr = NULL;
171 return err;
172}
173
174/* kfd_process_device_reserve_ib_mem - Reserve memory inside the
175 * process for IB usage The memory reserved is for KFD to submit
176 * IB to AMDGPU from kernel. If the memory is reserved
177 * successfully, ib_kaddr will have the CPU/kernel
178 * address. Check ib_kaddr before accessing the memory.
179 */
180static int kfd_process_device_reserve_ib_mem(struct kfd_process_device *pdd)
181{
182 struct qcm_process_device *qpd = &pdd->qpd;
183 uint32_t flags = ALLOC_MEM_FLAGS_GTT |
184 ALLOC_MEM_FLAGS_NO_SUBSTITUTE |
185 ALLOC_MEM_FLAGS_WRITABLE |
186 ALLOC_MEM_FLAGS_EXECUTABLE;
187 void *kaddr;
188 int ret;
189
190 if (qpd->ib_kaddr || !qpd->ib_base)
191 return 0;
192
193 /* ib_base is only set for dGPU */
194 ret = kfd_process_alloc_gpuvm(pdd, qpd->ib_base, PAGE_SIZE, flags,
195 &kaddr);
196 if (ret)
197 return ret;
198
199 qpd->ib_kaddr = kaddr;
200
201 return 0;
202}
203
204struct kfd_process *kfd_create_process(struct file *filep)
205{
206 struct kfd_process *process;
207 struct task_struct *thread = current;
208
209 if (!thread->mm)
210 return ERR_PTR(-EINVAL);
211
212 /* Only the pthreads threading model is supported. */
213 if (thread->group_leader->mm != thread->mm)
214 return ERR_PTR(-EINVAL);
215
216 /*
217 * take kfd processes mutex before starting of process creation
218 * so there won't be a case where two threads of the same process
219 * create two kfd_process structures
220 */
221 mutex_lock(&kfd_processes_mutex);
222
223 /* A prior open of /dev/kfd could have already created the process. */
224 process = find_process(thread);
225 if (process)
226 pr_debug("Process already found\n");
227 else
228 process = create_process(thread, filep);
229
230 mutex_unlock(&kfd_processes_mutex);
231
232 return process;
233}
234
235struct kfd_process *kfd_get_process(const struct task_struct *thread)
236{
237 struct kfd_process *process;
238
239 if (!thread->mm)
240 return ERR_PTR(-EINVAL);
241
242 /* Only the pthreads threading model is supported. */
243 if (thread->group_leader->mm != thread->mm)
244 return ERR_PTR(-EINVAL);
245
246 process = find_process(thread);
247
248 return process;
249}
250
251static struct kfd_process *find_process_by_mm(const struct mm_struct *mm)
252{
253 struct kfd_process *process;
254
255 hash_for_each_possible_rcu(kfd_processes_table, process,
256 kfd_processes, (uintptr_t)mm)
257 if (process->mm == mm)
258 return process;
259
260 return NULL;
261}
262
263static struct kfd_process *find_process(const struct task_struct *thread)
264{
265 struct kfd_process *p;
266 int idx;
267
268 idx = srcu_read_lock(&kfd_processes_srcu);
269 p = find_process_by_mm(thread->mm);
270 srcu_read_unlock(&kfd_processes_srcu, idx);
271
272 return p;
273}
274
275void kfd_unref_process(struct kfd_process *p)
276{
277 kref_put(&p->ref, kfd_process_ref_release);
278}
279
280static void kfd_process_device_free_bos(struct kfd_process_device *pdd)
281{
282 struct kfd_process *p = pdd->process;
283 void *mem;
284 int id;
285
286 /*
287 * Remove all handles from idr and release appropriate
288 * local memory object
289 */
290 idr_for_each_entry(&pdd->alloc_idr, mem, id) {
291 struct kfd_process_device *peer_pdd;
292
293 list_for_each_entry(peer_pdd, &p->per_device_data,
294 per_device_list) {
295 if (!peer_pdd->vm)
296 continue;
297 peer_pdd->dev->kfd2kgd->unmap_memory_to_gpu(
298 peer_pdd->dev->kgd, mem, peer_pdd->vm);
299 }
300
301 pdd->dev->kfd2kgd->free_memory_of_gpu(pdd->dev->kgd, mem);
302 kfd_process_device_remove_obj_handle(pdd, id);
303 }
304}
305
306static void kfd_process_free_outstanding_kfd_bos(struct kfd_process *p)
307{
308 struct kfd_process_device *pdd;
309
310 list_for_each_entry(pdd, &p->per_device_data, per_device_list)
311 kfd_process_device_free_bos(pdd);
312}
313
314static void kfd_process_destroy_pdds(struct kfd_process *p)
315{
316 struct kfd_process_device *pdd, *temp;
317
318 list_for_each_entry_safe(pdd, temp, &p->per_device_data,
319 per_device_list) {
320 pr_debug("Releasing pdd (topology id %d) for process (pasid %d)\n",
321 pdd->dev->id, p->pasid);
322
323 if (pdd->drm_file)
324 fput(pdd->drm_file);
325 else if (pdd->vm)
326 pdd->dev->kfd2kgd->destroy_process_vm(
327 pdd->dev->kgd, pdd->vm);
328
329 list_del(&pdd->per_device_list);
330
331 if (pdd->qpd.cwsr_kaddr && !pdd->qpd.cwsr_base)
332 free_pages((unsigned long)pdd->qpd.cwsr_kaddr,
333 get_order(KFD_CWSR_TBA_TMA_SIZE));
334
335 idr_destroy(&pdd->alloc_idr);
336
337 kfree(pdd);
338 }
339}
340
341/* No process locking is needed in this function, because the process
342 * is not findable any more. We must assume that no other thread is
343 * using it any more, otherwise we couldn't safely free the process
344 * structure in the end.
345 */
346static void kfd_process_wq_release(struct work_struct *work)
347{
348 struct kfd_process *p = container_of(work, struct kfd_process,
349 release_work);
350
351 kfd_iommu_unbind_process(p);
352
353 kfd_process_free_outstanding_kfd_bos(p);
354
355 kfd_process_destroy_pdds(p);
356 dma_fence_put(p->ef);
357
358 kfd_event_free_process(p);
359
360 kfd_pasid_free(p->pasid);
361 kfd_free_process_doorbells(p);
362
363 mutex_destroy(&p->mutex);
364
365 put_task_struct(p->lead_thread);
366
367 kfree(p);
368}
369
370static void kfd_process_ref_release(struct kref *ref)
371{
372 struct kfd_process *p = container_of(ref, struct kfd_process, ref);
373
374 INIT_WORK(&p->release_work, kfd_process_wq_release);
375 queue_work(kfd_process_wq, &p->release_work);
376}
377
378static void kfd_process_destroy_delayed(struct rcu_head *rcu)
379{
380 struct kfd_process *p = container_of(rcu, struct kfd_process, rcu);
381
382 kfd_unref_process(p);
383}
384
385static void kfd_process_notifier_release(struct mmu_notifier *mn,
386 struct mm_struct *mm)
387{
388 struct kfd_process *p;
389 struct kfd_process_device *pdd = NULL;
390
391 /*
392 * The kfd_process structure can not be free because the
393 * mmu_notifier srcu is read locked
394 */
395 p = container_of(mn, struct kfd_process, mmu_notifier);
396 if (WARN_ON(p->mm != mm))
397 return;
398
399 mutex_lock(&kfd_processes_mutex);
400 hash_del_rcu(&p->kfd_processes);
401 mutex_unlock(&kfd_processes_mutex);
402 synchronize_srcu(&kfd_processes_srcu);
403
404 cancel_delayed_work_sync(&p->eviction_work);
405 cancel_delayed_work_sync(&p->restore_work);
406
407 mutex_lock(&p->mutex);
408
409 /* Iterate over all process device data structures and if the
410 * pdd is in debug mode, we should first force unregistration,
411 * then we will be able to destroy the queues
412 */
413 list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
414 struct kfd_dev *dev = pdd->dev;
415
416 mutex_lock(kfd_get_dbgmgr_mutex());
417 if (dev && dev->dbgmgr && dev->dbgmgr->pasid == p->pasid) {
418 if (!kfd_dbgmgr_unregister(dev->dbgmgr, p)) {
419 kfd_dbgmgr_destroy(dev->dbgmgr);
420 dev->dbgmgr = NULL;
421 }
422 }
423 mutex_unlock(kfd_get_dbgmgr_mutex());
424 }
425
426 kfd_process_dequeue_from_all_devices(p);
427 pqm_uninit(&p->pqm);
428
429 /* Indicate to other users that MM is no longer valid */
430 p->mm = NULL;
431
432 mutex_unlock(&p->mutex);
433
434 mmu_notifier_unregister_no_release(&p->mmu_notifier, mm);
435 mmu_notifier_call_srcu(&p->rcu, &kfd_process_destroy_delayed);
436}
437
438static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
439 .release = kfd_process_notifier_release,
440};
441
442static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
443{
444 unsigned long offset;
445 struct kfd_process_device *pdd;
446
447 list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
448 struct kfd_dev *dev = pdd->dev;
449 struct qcm_process_device *qpd = &pdd->qpd;
450
451 if (!dev->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base)
452 continue;
453
454 offset = (dev->id | KFD_MMAP_RESERVED_MEM_MASK) << PAGE_SHIFT;
455 qpd->tba_addr = (int64_t)vm_mmap(filep, 0,
456 KFD_CWSR_TBA_TMA_SIZE, PROT_READ | PROT_EXEC,
457 MAP_SHARED, offset);
458
459 if (IS_ERR_VALUE(qpd->tba_addr)) {
460 int err = qpd->tba_addr;
461
462 pr_err("Failure to set tba address. error %d.\n", err);
463 qpd->tba_addr = 0;
464 qpd->cwsr_kaddr = NULL;
465 return err;
466 }
467
468 memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size);
469
470 qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
471 pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
472 qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
473 }
474
475 return 0;
476}
477
478static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd)
479{
480 struct kfd_dev *dev = pdd->dev;
481 struct qcm_process_device *qpd = &pdd->qpd;
482 uint32_t flags = ALLOC_MEM_FLAGS_GTT |
483 ALLOC_MEM_FLAGS_NO_SUBSTITUTE | ALLOC_MEM_FLAGS_EXECUTABLE;
484 void *kaddr;
485 int ret;
486
487 if (!dev->cwsr_enabled || qpd->cwsr_kaddr || !qpd->cwsr_base)
488 return 0;
489
490 /* cwsr_base is only set for dGPU */
491 ret = kfd_process_alloc_gpuvm(pdd, qpd->cwsr_base,
492 KFD_CWSR_TBA_TMA_SIZE, flags, &kaddr);
493 if (ret)
494 return ret;
495
496 qpd->cwsr_kaddr = kaddr;
497 qpd->tba_addr = qpd->cwsr_base;
498
499 memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size);
500
501 qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
502 pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
503 qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
504
505 return 0;
506}
507
508static struct kfd_process *create_process(const struct task_struct *thread,
509 struct file *filep)
510{
511 struct kfd_process *process;
512 int err = -ENOMEM;
513
514 process = kzalloc(sizeof(*process), GFP_KERNEL);
515
516 if (!process)
517 goto err_alloc_process;
518
519 process->pasid = kfd_pasid_alloc();
520 if (process->pasid == 0)
521 goto err_alloc_pasid;
522
523 if (kfd_alloc_process_doorbells(process) < 0)
524 goto err_alloc_doorbells;
525
526 kref_init(&process->ref);
527
528 mutex_init(&process->mutex);
529
530 process->mm = thread->mm;
531
532 /* register notifier */
533 process->mmu_notifier.ops = &kfd_process_mmu_notifier_ops;
534 err = mmu_notifier_register(&process->mmu_notifier, process->mm);
535 if (err)
536 goto err_mmu_notifier;
537
538 hash_add_rcu(kfd_processes_table, &process->kfd_processes,
539 (uintptr_t)process->mm);
540
541 process->lead_thread = thread->group_leader;
542 get_task_struct(process->lead_thread);
543
544 INIT_LIST_HEAD(&process->per_device_data);
545
546 kfd_event_init_process(process);
547
548 err = pqm_init(&process->pqm, process);
549 if (err != 0)
550 goto err_process_pqm_init;
551
552 /* init process apertures*/
553 process->is_32bit_user_mode = in_compat_syscall();
554 err = kfd_init_apertures(process);
555 if (err != 0)
556 goto err_init_apertures;
557
558 INIT_DELAYED_WORK(&process->eviction_work, evict_process_worker);
559 INIT_DELAYED_WORK(&process->restore_work, restore_process_worker);
560 process->last_restore_timestamp = get_jiffies_64();
561
562 err = kfd_process_init_cwsr_apu(process, filep);
563 if (err)
564 goto err_init_cwsr;
565
566 return process;
567
568err_init_cwsr:
569 kfd_process_free_outstanding_kfd_bos(process);
570 kfd_process_destroy_pdds(process);
571err_init_apertures:
572 pqm_uninit(&process->pqm);
573err_process_pqm_init:
574 hash_del_rcu(&process->kfd_processes);
575 synchronize_rcu();
576 mmu_notifier_unregister_no_release(&process->mmu_notifier, process->mm);
577err_mmu_notifier:
578 mutex_destroy(&process->mutex);
579 kfd_free_process_doorbells(process);
580err_alloc_doorbells:
581 kfd_pasid_free(process->pasid);
582err_alloc_pasid:
583 kfree(process);
584err_alloc_process:
585 return ERR_PTR(err);
586}
587
588struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
589 struct kfd_process *p)
590{
591 struct kfd_process_device *pdd = NULL;
592
593 list_for_each_entry(pdd, &p->per_device_data, per_device_list)
594 if (pdd->dev == dev)
595 return pdd;
596
597 return NULL;
598}
599
600struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
601 struct kfd_process *p)
602{
603 struct kfd_process_device *pdd = NULL;
604
605 pdd = kzalloc(sizeof(*pdd), GFP_KERNEL);
606 if (!pdd)
607 return NULL;
608
609 pdd->dev = dev;
610 INIT_LIST_HEAD(&pdd->qpd.queues_list);
611 INIT_LIST_HEAD(&pdd->qpd.priv_queue_list);
612 pdd->qpd.dqm = dev->dqm;
613 pdd->qpd.pqm = &p->pqm;
614 pdd->qpd.evicted = 0;
615 pdd->process = p;
616 pdd->bound = PDD_UNBOUND;
617 pdd->already_dequeued = false;
618 list_add(&pdd->per_device_list, &p->per_device_data);
619
620 /* Init idr used for memory handle translation */
621 idr_init(&pdd->alloc_idr);
622
623 return pdd;
624}
625
626/**
627 * kfd_process_device_init_vm - Initialize a VM for a process-device
628 *
629 * @pdd: The process-device
630 * @drm_file: Optional pointer to a DRM file descriptor
631 *
632 * If @drm_file is specified, it will be used to acquire the VM from
633 * that file descriptor. If successful, the @pdd takes ownership of
634 * the file descriptor.
635 *
636 * If @drm_file is NULL, a new VM is created.
637 *
638 * Returns 0 on success, -errno on failure.
639 */
640int kfd_process_device_init_vm(struct kfd_process_device *pdd,
641 struct file *drm_file)
642{
643 struct kfd_process *p;
644 struct kfd_dev *dev;
645 int ret;
646
647 if (pdd->vm)
648 return drm_file ? -EBUSY : 0;
649
650 p = pdd->process;
651 dev = pdd->dev;
652
653 if (drm_file)
654 ret = dev->kfd2kgd->acquire_process_vm(
655 dev->kgd, drm_file,
656 &pdd->vm, &p->kgd_process_info, &p->ef);
657 else
658 ret = dev->kfd2kgd->create_process_vm(
659 dev->kgd, &pdd->vm, &p->kgd_process_info, &p->ef);
660 if (ret) {
661 pr_err("Failed to create process VM object\n");
662 return ret;
663 }
664
665 ret = kfd_process_device_reserve_ib_mem(pdd);
666 if (ret)
667 goto err_reserve_ib_mem;
668 ret = kfd_process_device_init_cwsr_dgpu(pdd);
669 if (ret)
670 goto err_init_cwsr;
671
672 pdd->drm_file = drm_file;
673
674 return 0;
675
676err_init_cwsr:
677err_reserve_ib_mem:
678 kfd_process_device_free_bos(pdd);
679 if (!drm_file)
680 dev->kfd2kgd->destroy_process_vm(dev->kgd, pdd->vm);
681 pdd->vm = NULL;
682
683 return ret;
684}
685
686/*
687 * Direct the IOMMU to bind the process (specifically the pasid->mm)
688 * to the device.
689 * Unbinding occurs when the process dies or the device is removed.
690 *
691 * Assumes that the process lock is held.
692 */
693struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
694 struct kfd_process *p)
695{
696 struct kfd_process_device *pdd;
697 int err;
698
699 pdd = kfd_get_process_device_data(dev, p);
700 if (!pdd) {
701 pr_err("Process device data doesn't exist\n");
702 return ERR_PTR(-ENOMEM);
703 }
704
705 err = kfd_iommu_bind_process_to_device(pdd);
706 if (err)
707 return ERR_PTR(err);
708
709 err = kfd_process_device_init_vm(pdd, NULL);
710 if (err)
711 return ERR_PTR(err);
712
713 return pdd;
714}
715
716struct kfd_process_device *kfd_get_first_process_device_data(
717 struct kfd_process *p)
718{
719 return list_first_entry(&p->per_device_data,
720 struct kfd_process_device,
721 per_device_list);
722}
723
724struct kfd_process_device *kfd_get_next_process_device_data(
725 struct kfd_process *p,
726 struct kfd_process_device *pdd)
727{
728 if (list_is_last(&pdd->per_device_list, &p->per_device_data))
729 return NULL;
730 return list_next_entry(pdd, per_device_list);
731}
732
733bool kfd_has_process_device_data(struct kfd_process *p)
734{
735 return !(list_empty(&p->per_device_data));
736}
737
738/* Create specific handle mapped to mem from process local memory idr
739 * Assumes that the process lock is held.
740 */
741int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd,
742 void *mem)
743{
744 return idr_alloc(&pdd->alloc_idr, mem, 0, 0, GFP_KERNEL);
745}
746
747/* Translate specific handle from process local memory idr
748 * Assumes that the process lock is held.
749 */
750void *kfd_process_device_translate_handle(struct kfd_process_device *pdd,
751 int handle)
752{
753 if (handle < 0)
754 return NULL;
755
756 return idr_find(&pdd->alloc_idr, handle);
757}
758
759/* Remove specific handle from process local memory idr
760 * Assumes that the process lock is held.
761 */
762void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd,
763 int handle)
764{
765 if (handle >= 0)
766 idr_remove(&pdd->alloc_idr, handle);
767}
768
769/* This increments the process->ref counter. */
770struct kfd_process *kfd_lookup_process_by_pasid(unsigned int pasid)
771{
772 struct kfd_process *p, *ret_p = NULL;
773 unsigned int temp;
774
775 int idx = srcu_read_lock(&kfd_processes_srcu);
776
777 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
778 if (p->pasid == pasid) {
779 kref_get(&p->ref);
780 ret_p = p;
781 break;
782 }
783 }
784
785 srcu_read_unlock(&kfd_processes_srcu, idx);
786
787 return ret_p;
788}
789
790/* This increments the process->ref counter. */
791struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm)
792{
793 struct kfd_process *p;
794
795 int idx = srcu_read_lock(&kfd_processes_srcu);
796
797 p = find_process_by_mm(mm);
798 if (p)
799 kref_get(&p->ref);
800
801 srcu_read_unlock(&kfd_processes_srcu, idx);
802
803 return p;
804}
805
806/* process_evict_queues - Evict all user queues of a process
807 *
808 * Eviction is reference-counted per process-device. This means multiple
809 * evictions from different sources can be nested safely.
810 */
811static int process_evict_queues(struct kfd_process *p)
812{
813 struct kfd_process_device *pdd;
814 int r = 0;
815 unsigned int n_evicted = 0;
816
817 list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
818 r = pdd->dev->dqm->ops.evict_process_queues(pdd->dev->dqm,
819 &pdd->qpd);
820 if (r) {
821 pr_err("Failed to evict process queues\n");
822 goto fail;
823 }
824 n_evicted++;
825 }
826
827 return r;
828
829fail:
830 /* To keep state consistent, roll back partial eviction by
831 * restoring queues
832 */
833 list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
834 if (n_evicted == 0)
835 break;
836 if (pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
837 &pdd->qpd))
838 pr_err("Failed to restore queues\n");
839
840 n_evicted--;
841 }
842
843 return r;
844}
845
846/* process_restore_queues - Restore all user queues of a process */
847static int process_restore_queues(struct kfd_process *p)
848{
849 struct kfd_process_device *pdd;
850 int r, ret = 0;
851
852 list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
853 r = pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
854 &pdd->qpd);
855 if (r) {
856 pr_err("Failed to restore process queues\n");
857 if (!ret)
858 ret = r;
859 }
860 }
861
862 return ret;
863}
864
865static void evict_process_worker(struct work_struct *work)
866{
867 int ret;
868 struct kfd_process *p;
869 struct delayed_work *dwork;
870
871 dwork = to_delayed_work(work);
872
873 /* Process termination destroys this worker thread. So during the
874 * lifetime of this thread, kfd_process p will be valid
875 */
876 p = container_of(dwork, struct kfd_process, eviction_work);
877 WARN_ONCE(p->last_eviction_seqno != p->ef->seqno,
878 "Eviction fence mismatch\n");
879
880 /* Narrow window of overlap between restore and evict work
881 * item is possible. Once amdgpu_amdkfd_gpuvm_restore_process_bos
882 * unreserves KFD BOs, it is possible to evicted again. But
883 * restore has few more steps of finish. So lets wait for any
884 * previous restore work to complete
885 */
886 flush_delayed_work(&p->restore_work);
887
888 pr_debug("Started evicting pasid %d\n", p->pasid);
889 ret = process_evict_queues(p);
890 if (!ret) {
891 dma_fence_signal(p->ef);
892 dma_fence_put(p->ef);
893 p->ef = NULL;
894 queue_delayed_work(kfd_restore_wq, &p->restore_work,
895 msecs_to_jiffies(PROCESS_RESTORE_TIME_MS));
896
897 pr_debug("Finished evicting pasid %d\n", p->pasid);
898 } else
899 pr_err("Failed to evict queues of pasid %d\n", p->pasid);
900}
901
902static void restore_process_worker(struct work_struct *work)
903{
904 struct delayed_work *dwork;
905 struct kfd_process *p;
906 struct kfd_process_device *pdd;
907 int ret = 0;
908
909 dwork = to_delayed_work(work);
910
911 /* Process termination destroys this worker thread. So during the
912 * lifetime of this thread, kfd_process p will be valid
913 */
914 p = container_of(dwork, struct kfd_process, restore_work);
915
916 /* Call restore_process_bos on the first KGD device. This function
917 * takes care of restoring the whole process including other devices.
918 * Restore can fail if enough memory is not available. If so,
919 * reschedule again.
920 */
921 pdd = list_first_entry(&p->per_device_data,
922 struct kfd_process_device,
923 per_device_list);
924
925 pr_debug("Started restoring pasid %d\n", p->pasid);
926
927 /* Setting last_restore_timestamp before successful restoration.
928 * Otherwise this would have to be set by KGD (restore_process_bos)
929 * before KFD BOs are unreserved. If not, the process can be evicted
930 * again before the timestamp is set.
931 * If restore fails, the timestamp will be set again in the next
932 * attempt. This would mean that the minimum GPU quanta would be
933 * PROCESS_ACTIVE_TIME_MS - (time to execute the following two
934 * functions)
935 */
936
937 p->last_restore_timestamp = get_jiffies_64();
938 ret = pdd->dev->kfd2kgd->restore_process_bos(p->kgd_process_info,
939 &p->ef);
940 if (ret) {
941 pr_debug("Failed to restore BOs of pasid %d, retry after %d ms\n",
942 p->pasid, PROCESS_BACK_OFF_TIME_MS);
943 ret = queue_delayed_work(kfd_restore_wq, &p->restore_work,
944 msecs_to_jiffies(PROCESS_BACK_OFF_TIME_MS));
945 WARN(!ret, "reschedule restore work failed\n");
946 return;
947 }
948
949 ret = process_restore_queues(p);
950 if (!ret)
951 pr_debug("Finished restoring pasid %d\n", p->pasid);
952 else
953 pr_err("Failed to restore queues of pasid %d\n", p->pasid);
954}
955
956void kfd_suspend_all_processes(void)
957{
958 struct kfd_process *p;
959 unsigned int temp;
960 int idx = srcu_read_lock(&kfd_processes_srcu);
961
962 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
963 cancel_delayed_work_sync(&p->eviction_work);
964 cancel_delayed_work_sync(&p->restore_work);
965
966 if (process_evict_queues(p))
967 pr_err("Failed to suspend process %d\n", p->pasid);
968 dma_fence_signal(p->ef);
969 dma_fence_put(p->ef);
970 p->ef = NULL;
971 }
972 srcu_read_unlock(&kfd_processes_srcu, idx);
973}
974
975int kfd_resume_all_processes(void)
976{
977 struct kfd_process *p;
978 unsigned int temp;
979 int ret = 0, idx = srcu_read_lock(&kfd_processes_srcu);
980
981 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
982 if (!queue_delayed_work(kfd_restore_wq, &p->restore_work, 0)) {
983 pr_err("Restore process %d failed during resume\n",
984 p->pasid);
985 ret = -EFAULT;
986 }
987 }
988 srcu_read_unlock(&kfd_processes_srcu, idx);
989 return ret;
990}
991
992int kfd_reserved_mem_mmap(struct kfd_process *process,
993 struct vm_area_struct *vma)
994{
995 struct kfd_dev *dev = kfd_device_by_id(vma->vm_pgoff);
996 struct kfd_process_device *pdd;
997 struct qcm_process_device *qpd;
998
999 if (!dev)
1000 return -EINVAL;
1001 if ((vma->vm_end - vma->vm_start) != KFD_CWSR_TBA_TMA_SIZE) {
1002 pr_err("Incorrect CWSR mapping size.\n");
1003 return -EINVAL;
1004 }
1005
1006 pdd = kfd_get_process_device_data(dev, process);
1007 if (!pdd)
1008 return -EINVAL;
1009 qpd = &pdd->qpd;
1010
1011 qpd->cwsr_kaddr = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1012 get_order(KFD_CWSR_TBA_TMA_SIZE));
1013 if (!qpd->cwsr_kaddr) {
1014 pr_err("Error allocating per process CWSR buffer.\n");
1015 return -ENOMEM;
1016 }
1017
1018 vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND
1019 | VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP;
1020 /* Mapping pages to user process */
1021 return remap_pfn_range(vma, vma->vm_start,
1022 PFN_DOWN(__pa(qpd->cwsr_kaddr)),
1023 KFD_CWSR_TBA_TMA_SIZE, vma->vm_page_prot);
1024}
1025
1026void kfd_flush_tlb(struct kfd_process_device *pdd)
1027{
1028 struct kfd_dev *dev = pdd->dev;
1029 const struct kfd2kgd_calls *f2g = dev->kfd2kgd;
1030
1031 if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
1032 /* Nothing to flush until a VMID is assigned, which
1033 * only happens when the first queue is created.
1034 */
1035 if (pdd->qpd.vmid)
1036 f2g->invalidate_tlbs_vmid(dev->kgd, pdd->qpd.vmid);
1037 } else {
1038 f2g->invalidate_tlbs(dev->kgd, pdd->process->pasid);
1039 }
1040}
1041
1042#if defined(CONFIG_DEBUG_FS)
1043
1044int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data)
1045{
1046 struct kfd_process *p;
1047 unsigned int temp;
1048 int r = 0;
1049
1050 int idx = srcu_read_lock(&kfd_processes_srcu);
1051
1052 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1053 seq_printf(m, "Process %d PASID %d:\n",
1054 p->lead_thread->tgid, p->pasid);
1055
1056 mutex_lock(&p->mutex);
1057 r = pqm_debugfs_mqds(m, &p->pqm);
1058 mutex_unlock(&p->mutex);
1059
1060 if (r)
1061 break;
1062 }
1063
1064 srcu_read_unlock(&kfd_processes_srcu, idx);
1065
1066 return r;
1067}
1068
1069#endif
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#include <linux/mutex.h>
24#include <linux/log2.h>
25#include <linux/sched.h>
26#include <linux/sched/mm.h>
27#include <linux/sched/task.h>
28#include <linux/mmu_context.h>
29#include <linux/slab.h>
30#include <linux/amd-iommu.h>
31#include <linux/notifier.h>
32#include <linux/compat.h>
33#include <linux/mman.h>
34#include <linux/file.h>
35#include <linux/pm_runtime.h>
36#include "amdgpu_amdkfd.h"
37#include "amdgpu.h"
38
39struct mm_struct;
40
41#include "kfd_priv.h"
42#include "kfd_device_queue_manager.h"
43#include "kfd_dbgmgr.h"
44#include "kfd_iommu.h"
45
46/*
47 * List of struct kfd_process (field kfd_process).
48 * Unique/indexed by mm_struct*
49 */
50DEFINE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE);
51static DEFINE_MUTEX(kfd_processes_mutex);
52
53DEFINE_SRCU(kfd_processes_srcu);
54
55/* For process termination handling */
56static struct workqueue_struct *kfd_process_wq;
57
58/* Ordered, single-threaded workqueue for restoring evicted
59 * processes. Restoring multiple processes concurrently under memory
60 * pressure can lead to processes blocking each other from validating
61 * their BOs and result in a live-lock situation where processes
62 * remain evicted indefinitely.
63 */
64static struct workqueue_struct *kfd_restore_wq;
65
66static struct kfd_process *find_process(const struct task_struct *thread);
67static void kfd_process_ref_release(struct kref *ref);
68static struct kfd_process *create_process(const struct task_struct *thread);
69static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep);
70
71static void evict_process_worker(struct work_struct *work);
72static void restore_process_worker(struct work_struct *work);
73
74struct kfd_procfs_tree {
75 struct kobject *kobj;
76};
77
78static struct kfd_procfs_tree procfs;
79
80/*
81 * Structure for SDMA activity tracking
82 */
83struct kfd_sdma_activity_handler_workarea {
84 struct work_struct sdma_activity_work;
85 struct kfd_process_device *pdd;
86 uint64_t sdma_activity_counter;
87};
88
89struct temp_sdma_queue_list {
90 uint64_t rptr;
91 uint64_t sdma_val;
92 unsigned int queue_id;
93 struct list_head list;
94};
95
96static void kfd_sdma_activity_worker(struct work_struct *work)
97{
98 struct kfd_sdma_activity_handler_workarea *workarea;
99 struct kfd_process_device *pdd;
100 uint64_t val;
101 struct mm_struct *mm;
102 struct queue *q;
103 struct qcm_process_device *qpd;
104 struct device_queue_manager *dqm;
105 int ret = 0;
106 struct temp_sdma_queue_list sdma_q_list;
107 struct temp_sdma_queue_list *sdma_q, *next;
108
109 workarea = container_of(work, struct kfd_sdma_activity_handler_workarea,
110 sdma_activity_work);
111 if (!workarea)
112 return;
113
114 pdd = workarea->pdd;
115 if (!pdd)
116 return;
117 dqm = pdd->dev->dqm;
118 qpd = &pdd->qpd;
119 if (!dqm || !qpd)
120 return;
121 /*
122 * Total SDMA activity is current SDMA activity + past SDMA activity
123 * Past SDMA count is stored in pdd.
124 * To get the current activity counters for all active SDMA queues,
125 * we loop over all SDMA queues and get their counts from user-space.
126 *
127 * We cannot call get_user() with dqm_lock held as it can cause
128 * a circular lock dependency situation. To read the SDMA stats,
129 * we need to do the following:
130 *
131 * 1. Create a temporary list of SDMA queue nodes from the qpd->queues_list,
132 * with dqm_lock/dqm_unlock().
133 * 2. Call get_user() for each node in temporary list without dqm_lock.
134 * Save the SDMA count for each node and also add the count to the total
135 * SDMA count counter.
136 * Its possible, during this step, a few SDMA queue nodes got deleted
137 * from the qpd->queues_list.
138 * 3. Do a second pass over qpd->queues_list to check if any nodes got deleted.
139 * If any node got deleted, its SDMA count would be captured in the sdma
140 * past activity counter. So subtract the SDMA counter stored in step 2
141 * for this node from the total SDMA count.
142 */
143 INIT_LIST_HEAD(&sdma_q_list.list);
144
145 /*
146 * Create the temp list of all SDMA queues
147 */
148 dqm_lock(dqm);
149
150 list_for_each_entry(q, &qpd->queues_list, list) {
151 if ((q->properties.type != KFD_QUEUE_TYPE_SDMA) &&
152 (q->properties.type != KFD_QUEUE_TYPE_SDMA_XGMI))
153 continue;
154
155 sdma_q = kzalloc(sizeof(struct temp_sdma_queue_list), GFP_KERNEL);
156 if (!sdma_q) {
157 dqm_unlock(dqm);
158 goto cleanup;
159 }
160
161 INIT_LIST_HEAD(&sdma_q->list);
162 sdma_q->rptr = (uint64_t)q->properties.read_ptr;
163 sdma_q->queue_id = q->properties.queue_id;
164 list_add_tail(&sdma_q->list, &sdma_q_list.list);
165 }
166
167 /*
168 * If the temp list is empty, then no SDMA queues nodes were found in
169 * qpd->queues_list. Return the past activity count as the total sdma
170 * count
171 */
172 if (list_empty(&sdma_q_list.list)) {
173 workarea->sdma_activity_counter = pdd->sdma_past_activity_counter;
174 dqm_unlock(dqm);
175 return;
176 }
177
178 dqm_unlock(dqm);
179
180 /*
181 * Get the usage count for each SDMA queue in temp_list.
182 */
183 mm = get_task_mm(pdd->process->lead_thread);
184 if (!mm)
185 goto cleanup;
186
187 kthread_use_mm(mm);
188
189 list_for_each_entry(sdma_q, &sdma_q_list.list, list) {
190 val = 0;
191 ret = read_sdma_queue_counter(sdma_q->rptr, &val);
192 if (ret) {
193 pr_debug("Failed to read SDMA queue active counter for queue id: %d",
194 sdma_q->queue_id);
195 } else {
196 sdma_q->sdma_val = val;
197 workarea->sdma_activity_counter += val;
198 }
199 }
200
201 kthread_unuse_mm(mm);
202 mmput(mm);
203
204 /*
205 * Do a second iteration over qpd_queues_list to check if any SDMA
206 * nodes got deleted while fetching SDMA counter.
207 */
208 dqm_lock(dqm);
209
210 workarea->sdma_activity_counter += pdd->sdma_past_activity_counter;
211
212 list_for_each_entry(q, &qpd->queues_list, list) {
213 if (list_empty(&sdma_q_list.list))
214 break;
215
216 if ((q->properties.type != KFD_QUEUE_TYPE_SDMA) &&
217 (q->properties.type != KFD_QUEUE_TYPE_SDMA_XGMI))
218 continue;
219
220 list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) {
221 if (((uint64_t)q->properties.read_ptr == sdma_q->rptr) &&
222 (sdma_q->queue_id == q->properties.queue_id)) {
223 list_del(&sdma_q->list);
224 kfree(sdma_q);
225 break;
226 }
227 }
228 }
229
230 dqm_unlock(dqm);
231
232 /*
233 * If temp list is not empty, it implies some queues got deleted
234 * from qpd->queues_list during SDMA usage read. Subtract the SDMA
235 * count for each node from the total SDMA count.
236 */
237 list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) {
238 workarea->sdma_activity_counter -= sdma_q->sdma_val;
239 list_del(&sdma_q->list);
240 kfree(sdma_q);
241 }
242
243 return;
244
245cleanup:
246 list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) {
247 list_del(&sdma_q->list);
248 kfree(sdma_q);
249 }
250}
251
252static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr,
253 char *buffer)
254{
255 if (strcmp(attr->name, "pasid") == 0) {
256 struct kfd_process *p = container_of(attr, struct kfd_process,
257 attr_pasid);
258
259 return snprintf(buffer, PAGE_SIZE, "%d\n", p->pasid);
260 } else if (strncmp(attr->name, "vram_", 5) == 0) {
261 struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device,
262 attr_vram);
263 return snprintf(buffer, PAGE_SIZE, "%llu\n", READ_ONCE(pdd->vram_usage));
264 } else if (strncmp(attr->name, "sdma_", 5) == 0) {
265 struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device,
266 attr_sdma);
267 struct kfd_sdma_activity_handler_workarea sdma_activity_work_handler;
268
269 INIT_WORK(&sdma_activity_work_handler.sdma_activity_work,
270 kfd_sdma_activity_worker);
271
272 sdma_activity_work_handler.pdd = pdd;
273
274 schedule_work(&sdma_activity_work_handler.sdma_activity_work);
275
276 flush_work(&sdma_activity_work_handler.sdma_activity_work);
277
278 return snprintf(buffer, PAGE_SIZE, "%llu\n",
279 (sdma_activity_work_handler.sdma_activity_counter)/
280 SDMA_ACTIVITY_DIVISOR);
281 } else {
282 pr_err("Invalid attribute");
283 return -EINVAL;
284 }
285
286 return 0;
287}
288
289static void kfd_procfs_kobj_release(struct kobject *kobj)
290{
291 kfree(kobj);
292}
293
294static const struct sysfs_ops kfd_procfs_ops = {
295 .show = kfd_procfs_show,
296};
297
298static struct kobj_type procfs_type = {
299 .release = kfd_procfs_kobj_release,
300 .sysfs_ops = &kfd_procfs_ops,
301};
302
303void kfd_procfs_init(void)
304{
305 int ret = 0;
306
307 procfs.kobj = kfd_alloc_struct(procfs.kobj);
308 if (!procfs.kobj)
309 return;
310
311 ret = kobject_init_and_add(procfs.kobj, &procfs_type,
312 &kfd_device->kobj, "proc");
313 if (ret) {
314 pr_warn("Could not create procfs proc folder");
315 /* If we fail to create the procfs, clean up */
316 kfd_procfs_shutdown();
317 }
318}
319
320void kfd_procfs_shutdown(void)
321{
322 if (procfs.kobj) {
323 kobject_del(procfs.kobj);
324 kobject_put(procfs.kobj);
325 procfs.kobj = NULL;
326 }
327}
328
329static ssize_t kfd_procfs_queue_show(struct kobject *kobj,
330 struct attribute *attr, char *buffer)
331{
332 struct queue *q = container_of(kobj, struct queue, kobj);
333
334 if (!strcmp(attr->name, "size"))
335 return snprintf(buffer, PAGE_SIZE, "%llu",
336 q->properties.queue_size);
337 else if (!strcmp(attr->name, "type"))
338 return snprintf(buffer, PAGE_SIZE, "%d", q->properties.type);
339 else if (!strcmp(attr->name, "gpuid"))
340 return snprintf(buffer, PAGE_SIZE, "%u", q->device->id);
341 else
342 pr_err("Invalid attribute");
343
344 return 0;
345}
346
347static struct attribute attr_queue_size = {
348 .name = "size",
349 .mode = KFD_SYSFS_FILE_MODE
350};
351
352static struct attribute attr_queue_type = {
353 .name = "type",
354 .mode = KFD_SYSFS_FILE_MODE
355};
356
357static struct attribute attr_queue_gpuid = {
358 .name = "gpuid",
359 .mode = KFD_SYSFS_FILE_MODE
360};
361
362static struct attribute *procfs_queue_attrs[] = {
363 &attr_queue_size,
364 &attr_queue_type,
365 &attr_queue_gpuid,
366 NULL
367};
368
369static const struct sysfs_ops procfs_queue_ops = {
370 .show = kfd_procfs_queue_show,
371};
372
373static struct kobj_type procfs_queue_type = {
374 .sysfs_ops = &procfs_queue_ops,
375 .default_attrs = procfs_queue_attrs,
376};
377
378int kfd_procfs_add_queue(struct queue *q)
379{
380 struct kfd_process *proc;
381 int ret;
382
383 if (!q || !q->process)
384 return -EINVAL;
385 proc = q->process;
386
387 /* Create proc/<pid>/queues/<queue id> folder */
388 if (!proc->kobj_queues)
389 return -EFAULT;
390 ret = kobject_init_and_add(&q->kobj, &procfs_queue_type,
391 proc->kobj_queues, "%u", q->properties.queue_id);
392 if (ret < 0) {
393 pr_warn("Creating proc/<pid>/queues/%u failed",
394 q->properties.queue_id);
395 kobject_put(&q->kobj);
396 return ret;
397 }
398
399 return 0;
400}
401
402static int kfd_sysfs_create_file(struct kfd_process *p, struct attribute *attr,
403 char *name)
404{
405 int ret = 0;
406
407 if (!p || !attr || !name)
408 return -EINVAL;
409
410 attr->name = name;
411 attr->mode = KFD_SYSFS_FILE_MODE;
412 sysfs_attr_init(attr);
413
414 ret = sysfs_create_file(p->kobj, attr);
415
416 return ret;
417}
418
419static int kfd_procfs_add_sysfs_files(struct kfd_process *p)
420{
421 int ret = 0;
422 struct kfd_process_device *pdd;
423
424 if (!p)
425 return -EINVAL;
426
427 if (!p->kobj)
428 return -EFAULT;
429
430 /*
431 * Create sysfs files for each GPU:
432 * - proc/<pid>/vram_<gpuid>
433 * - proc/<pid>/sdma_<gpuid>
434 */
435 list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
436 snprintf(pdd->vram_filename, MAX_SYSFS_FILENAME_LEN, "vram_%u",
437 pdd->dev->id);
438 ret = kfd_sysfs_create_file(p, &pdd->attr_vram, pdd->vram_filename);
439 if (ret)
440 pr_warn("Creating vram usage for gpu id %d failed",
441 (int)pdd->dev->id);
442
443 snprintf(pdd->sdma_filename, MAX_SYSFS_FILENAME_LEN, "sdma_%u",
444 pdd->dev->id);
445 ret = kfd_sysfs_create_file(p, &pdd->attr_sdma, pdd->sdma_filename);
446 if (ret)
447 pr_warn("Creating sdma usage for gpu id %d failed",
448 (int)pdd->dev->id);
449 }
450
451 return ret;
452}
453
454
455void kfd_procfs_del_queue(struct queue *q)
456{
457 if (!q)
458 return;
459
460 kobject_del(&q->kobj);
461 kobject_put(&q->kobj);
462}
463
464int kfd_process_create_wq(void)
465{
466 if (!kfd_process_wq)
467 kfd_process_wq = alloc_workqueue("kfd_process_wq", 0, 0);
468 if (!kfd_restore_wq)
469 kfd_restore_wq = alloc_ordered_workqueue("kfd_restore_wq", 0);
470
471 if (!kfd_process_wq || !kfd_restore_wq) {
472 kfd_process_destroy_wq();
473 return -ENOMEM;
474 }
475
476 return 0;
477}
478
479void kfd_process_destroy_wq(void)
480{
481 if (kfd_process_wq) {
482 destroy_workqueue(kfd_process_wq);
483 kfd_process_wq = NULL;
484 }
485 if (kfd_restore_wq) {
486 destroy_workqueue(kfd_restore_wq);
487 kfd_restore_wq = NULL;
488 }
489}
490
491static void kfd_process_free_gpuvm(struct kgd_mem *mem,
492 struct kfd_process_device *pdd)
493{
494 struct kfd_dev *dev = pdd->dev;
495
496 amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->kgd, mem, pdd->vm);
497 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, mem, NULL);
498}
499
500/* kfd_process_alloc_gpuvm - Allocate GPU VM for the KFD process
501 * This function should be only called right after the process
502 * is created and when kfd_processes_mutex is still being held
503 * to avoid concurrency. Because of that exclusiveness, we do
504 * not need to take p->mutex.
505 */
506static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
507 uint64_t gpu_va, uint32_t size,
508 uint32_t flags, void **kptr)
509{
510 struct kfd_dev *kdev = pdd->dev;
511 struct kgd_mem *mem = NULL;
512 int handle;
513 int err;
514
515 err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(kdev->kgd, gpu_va, size,
516 pdd->vm, &mem, NULL, flags);
517 if (err)
518 goto err_alloc_mem;
519
520 err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->kgd, mem, pdd->vm);
521 if (err)
522 goto err_map_mem;
523
524 err = amdgpu_amdkfd_gpuvm_sync_memory(kdev->kgd, mem, true);
525 if (err) {
526 pr_debug("Sync memory failed, wait interrupted by user signal\n");
527 goto sync_memory_failed;
528 }
529
530 /* Create an obj handle so kfd_process_device_remove_obj_handle
531 * will take care of the bo removal when the process finishes.
532 * We do not need to take p->mutex, because the process is just
533 * created and the ioctls have not had the chance to run.
534 */
535 handle = kfd_process_device_create_obj_handle(pdd, mem);
536
537 if (handle < 0) {
538 err = handle;
539 goto free_gpuvm;
540 }
541
542 if (kptr) {
543 err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(kdev->kgd,
544 (struct kgd_mem *)mem, kptr, NULL);
545 if (err) {
546 pr_debug("Map GTT BO to kernel failed\n");
547 goto free_obj_handle;
548 }
549 }
550
551 return err;
552
553free_obj_handle:
554 kfd_process_device_remove_obj_handle(pdd, handle);
555free_gpuvm:
556sync_memory_failed:
557 kfd_process_free_gpuvm(mem, pdd);
558 return err;
559
560err_map_mem:
561 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->kgd, mem, NULL);
562err_alloc_mem:
563 *kptr = NULL;
564 return err;
565}
566
567/* kfd_process_device_reserve_ib_mem - Reserve memory inside the
568 * process for IB usage The memory reserved is for KFD to submit
569 * IB to AMDGPU from kernel. If the memory is reserved
570 * successfully, ib_kaddr will have the CPU/kernel
571 * address. Check ib_kaddr before accessing the memory.
572 */
573static int kfd_process_device_reserve_ib_mem(struct kfd_process_device *pdd)
574{
575 struct qcm_process_device *qpd = &pdd->qpd;
576 uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT |
577 KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE |
578 KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE |
579 KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
580 void *kaddr;
581 int ret;
582
583 if (qpd->ib_kaddr || !qpd->ib_base)
584 return 0;
585
586 /* ib_base is only set for dGPU */
587 ret = kfd_process_alloc_gpuvm(pdd, qpd->ib_base, PAGE_SIZE, flags,
588 &kaddr);
589 if (ret)
590 return ret;
591
592 qpd->ib_kaddr = kaddr;
593
594 return 0;
595}
596
597struct kfd_process *kfd_create_process(struct file *filep)
598{
599 struct kfd_process *process;
600 struct task_struct *thread = current;
601 int ret;
602
603 if (!thread->mm)
604 return ERR_PTR(-EINVAL);
605
606 /* Only the pthreads threading model is supported. */
607 if (thread->group_leader->mm != thread->mm)
608 return ERR_PTR(-EINVAL);
609
610 /*
611 * take kfd processes mutex before starting of process creation
612 * so there won't be a case where two threads of the same process
613 * create two kfd_process structures
614 */
615 mutex_lock(&kfd_processes_mutex);
616
617 /* A prior open of /dev/kfd could have already created the process. */
618 process = find_process(thread);
619 if (process) {
620 pr_debug("Process already found\n");
621 } else {
622 process = create_process(thread);
623 if (IS_ERR(process))
624 goto out;
625
626 ret = kfd_process_init_cwsr_apu(process, filep);
627 if (ret) {
628 process = ERR_PTR(ret);
629 goto out;
630 }
631
632 if (!procfs.kobj)
633 goto out;
634
635 process->kobj = kfd_alloc_struct(process->kobj);
636 if (!process->kobj) {
637 pr_warn("Creating procfs kobject failed");
638 goto out;
639 }
640 ret = kobject_init_and_add(process->kobj, &procfs_type,
641 procfs.kobj, "%d",
642 (int)process->lead_thread->pid);
643 if (ret) {
644 pr_warn("Creating procfs pid directory failed");
645 kobject_put(process->kobj);
646 goto out;
647 }
648
649 process->attr_pasid.name = "pasid";
650 process->attr_pasid.mode = KFD_SYSFS_FILE_MODE;
651 sysfs_attr_init(&process->attr_pasid);
652 ret = sysfs_create_file(process->kobj, &process->attr_pasid);
653 if (ret)
654 pr_warn("Creating pasid for pid %d failed",
655 (int)process->lead_thread->pid);
656
657 process->kobj_queues = kobject_create_and_add("queues",
658 process->kobj);
659 if (!process->kobj_queues)
660 pr_warn("Creating KFD proc/queues folder failed");
661
662 ret = kfd_procfs_add_sysfs_files(process);
663 if (ret)
664 pr_warn("Creating sysfs usage file for pid %d failed",
665 (int)process->lead_thread->pid);
666 }
667out:
668 if (!IS_ERR(process))
669 kref_get(&process->ref);
670 mutex_unlock(&kfd_processes_mutex);
671
672 return process;
673}
674
675struct kfd_process *kfd_get_process(const struct task_struct *thread)
676{
677 struct kfd_process *process;
678
679 if (!thread->mm)
680 return ERR_PTR(-EINVAL);
681
682 /* Only the pthreads threading model is supported. */
683 if (thread->group_leader->mm != thread->mm)
684 return ERR_PTR(-EINVAL);
685
686 process = find_process(thread);
687 if (!process)
688 return ERR_PTR(-EINVAL);
689
690 return process;
691}
692
693static struct kfd_process *find_process_by_mm(const struct mm_struct *mm)
694{
695 struct kfd_process *process;
696
697 hash_for_each_possible_rcu(kfd_processes_table, process,
698 kfd_processes, (uintptr_t)mm)
699 if (process->mm == mm)
700 return process;
701
702 return NULL;
703}
704
705static struct kfd_process *find_process(const struct task_struct *thread)
706{
707 struct kfd_process *p;
708 int idx;
709
710 idx = srcu_read_lock(&kfd_processes_srcu);
711 p = find_process_by_mm(thread->mm);
712 srcu_read_unlock(&kfd_processes_srcu, idx);
713
714 return p;
715}
716
717void kfd_unref_process(struct kfd_process *p)
718{
719 kref_put(&p->ref, kfd_process_ref_release);
720}
721
722static void kfd_process_device_free_bos(struct kfd_process_device *pdd)
723{
724 struct kfd_process *p = pdd->process;
725 void *mem;
726 int id;
727
728 /*
729 * Remove all handles from idr and release appropriate
730 * local memory object
731 */
732 idr_for_each_entry(&pdd->alloc_idr, mem, id) {
733 struct kfd_process_device *peer_pdd;
734
735 list_for_each_entry(peer_pdd, &p->per_device_data,
736 per_device_list) {
737 if (!peer_pdd->vm)
738 continue;
739 amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
740 peer_pdd->dev->kgd, mem, peer_pdd->vm);
741 }
742
743 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->kgd, mem, NULL);
744 kfd_process_device_remove_obj_handle(pdd, id);
745 }
746}
747
748static void kfd_process_free_outstanding_kfd_bos(struct kfd_process *p)
749{
750 struct kfd_process_device *pdd;
751
752 list_for_each_entry(pdd, &p->per_device_data, per_device_list)
753 kfd_process_device_free_bos(pdd);
754}
755
756static void kfd_process_destroy_pdds(struct kfd_process *p)
757{
758 struct kfd_process_device *pdd, *temp;
759
760 list_for_each_entry_safe(pdd, temp, &p->per_device_data,
761 per_device_list) {
762 pr_debug("Releasing pdd (topology id %d) for process (pasid 0x%x)\n",
763 pdd->dev->id, p->pasid);
764
765 if (pdd->drm_file) {
766 amdgpu_amdkfd_gpuvm_release_process_vm(
767 pdd->dev->kgd, pdd->vm);
768 fput(pdd->drm_file);
769 }
770 else if (pdd->vm)
771 amdgpu_amdkfd_gpuvm_destroy_process_vm(
772 pdd->dev->kgd, pdd->vm);
773
774 list_del(&pdd->per_device_list);
775
776 if (pdd->qpd.cwsr_kaddr && !pdd->qpd.cwsr_base)
777 free_pages((unsigned long)pdd->qpd.cwsr_kaddr,
778 get_order(KFD_CWSR_TBA_TMA_SIZE));
779
780 kfree(pdd->qpd.doorbell_bitmap);
781 idr_destroy(&pdd->alloc_idr);
782
783 /*
784 * before destroying pdd, make sure to report availability
785 * for auto suspend
786 */
787 if (pdd->runtime_inuse) {
788 pm_runtime_mark_last_busy(pdd->dev->ddev->dev);
789 pm_runtime_put_autosuspend(pdd->dev->ddev->dev);
790 pdd->runtime_inuse = false;
791 }
792
793 kfree(pdd);
794 }
795}
796
797/* No process locking is needed in this function, because the process
798 * is not findable any more. We must assume that no other thread is
799 * using it any more, otherwise we couldn't safely free the process
800 * structure in the end.
801 */
802static void kfd_process_wq_release(struct work_struct *work)
803{
804 struct kfd_process *p = container_of(work, struct kfd_process,
805 release_work);
806 struct kfd_process_device *pdd;
807
808 /* Remove the procfs files */
809 if (p->kobj) {
810 sysfs_remove_file(p->kobj, &p->attr_pasid);
811 kobject_del(p->kobj_queues);
812 kobject_put(p->kobj_queues);
813 p->kobj_queues = NULL;
814
815 list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
816 sysfs_remove_file(p->kobj, &pdd->attr_vram);
817 sysfs_remove_file(p->kobj, &pdd->attr_sdma);
818 }
819
820 kobject_del(p->kobj);
821 kobject_put(p->kobj);
822 p->kobj = NULL;
823 }
824
825 kfd_iommu_unbind_process(p);
826
827 kfd_process_free_outstanding_kfd_bos(p);
828
829 kfd_process_destroy_pdds(p);
830 dma_fence_put(p->ef);
831
832 kfd_event_free_process(p);
833
834 kfd_pasid_free(p->pasid);
835 kfd_free_process_doorbells(p);
836
837 mutex_destroy(&p->mutex);
838
839 put_task_struct(p->lead_thread);
840
841 kfree(p);
842}
843
844static void kfd_process_ref_release(struct kref *ref)
845{
846 struct kfd_process *p = container_of(ref, struct kfd_process, ref);
847
848 INIT_WORK(&p->release_work, kfd_process_wq_release);
849 queue_work(kfd_process_wq, &p->release_work);
850}
851
852static void kfd_process_free_notifier(struct mmu_notifier *mn)
853{
854 kfd_unref_process(container_of(mn, struct kfd_process, mmu_notifier));
855}
856
857static void kfd_process_notifier_release(struct mmu_notifier *mn,
858 struct mm_struct *mm)
859{
860 struct kfd_process *p;
861 struct kfd_process_device *pdd = NULL;
862
863 /*
864 * The kfd_process structure can not be free because the
865 * mmu_notifier srcu is read locked
866 */
867 p = container_of(mn, struct kfd_process, mmu_notifier);
868 if (WARN_ON(p->mm != mm))
869 return;
870
871 mutex_lock(&kfd_processes_mutex);
872 hash_del_rcu(&p->kfd_processes);
873 mutex_unlock(&kfd_processes_mutex);
874 synchronize_srcu(&kfd_processes_srcu);
875
876 cancel_delayed_work_sync(&p->eviction_work);
877 cancel_delayed_work_sync(&p->restore_work);
878
879 mutex_lock(&p->mutex);
880
881 /* Iterate over all process device data structures and if the
882 * pdd is in debug mode, we should first force unregistration,
883 * then we will be able to destroy the queues
884 */
885 list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
886 struct kfd_dev *dev = pdd->dev;
887
888 mutex_lock(kfd_get_dbgmgr_mutex());
889 if (dev && dev->dbgmgr && dev->dbgmgr->pasid == p->pasid) {
890 if (!kfd_dbgmgr_unregister(dev->dbgmgr, p)) {
891 kfd_dbgmgr_destroy(dev->dbgmgr);
892 dev->dbgmgr = NULL;
893 }
894 }
895 mutex_unlock(kfd_get_dbgmgr_mutex());
896 }
897
898 kfd_process_dequeue_from_all_devices(p);
899 pqm_uninit(&p->pqm);
900
901 /* Indicate to other users that MM is no longer valid */
902 p->mm = NULL;
903 /* Signal the eviction fence after user mode queues are
904 * destroyed. This allows any BOs to be freed without
905 * triggering pointless evictions or waiting for fences.
906 */
907 dma_fence_signal(p->ef);
908
909 mutex_unlock(&p->mutex);
910
911 mmu_notifier_put(&p->mmu_notifier);
912}
913
914static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
915 .release = kfd_process_notifier_release,
916 .free_notifier = kfd_process_free_notifier,
917};
918
919static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
920{
921 unsigned long offset;
922 struct kfd_process_device *pdd;
923
924 list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
925 struct kfd_dev *dev = pdd->dev;
926 struct qcm_process_device *qpd = &pdd->qpd;
927
928 if (!dev->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base)
929 continue;
930
931 offset = KFD_MMAP_TYPE_RESERVED_MEM | KFD_MMAP_GPU_ID(dev->id);
932 qpd->tba_addr = (int64_t)vm_mmap(filep, 0,
933 KFD_CWSR_TBA_TMA_SIZE, PROT_READ | PROT_EXEC,
934 MAP_SHARED, offset);
935
936 if (IS_ERR_VALUE(qpd->tba_addr)) {
937 int err = qpd->tba_addr;
938
939 pr_err("Failure to set tba address. error %d.\n", err);
940 qpd->tba_addr = 0;
941 qpd->cwsr_kaddr = NULL;
942 return err;
943 }
944
945 memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size);
946
947 qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
948 pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
949 qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
950 }
951
952 return 0;
953}
954
955static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd)
956{
957 struct kfd_dev *dev = pdd->dev;
958 struct qcm_process_device *qpd = &pdd->qpd;
959 uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT
960 | KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE
961 | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
962 void *kaddr;
963 int ret;
964
965 if (!dev->cwsr_enabled || qpd->cwsr_kaddr || !qpd->cwsr_base)
966 return 0;
967
968 /* cwsr_base is only set for dGPU */
969 ret = kfd_process_alloc_gpuvm(pdd, qpd->cwsr_base,
970 KFD_CWSR_TBA_TMA_SIZE, flags, &kaddr);
971 if (ret)
972 return ret;
973
974 qpd->cwsr_kaddr = kaddr;
975 qpd->tba_addr = qpd->cwsr_base;
976
977 memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size);
978
979 qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
980 pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
981 qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
982
983 return 0;
984}
985
986/*
987 * On return the kfd_process is fully operational and will be freed when the
988 * mm is released
989 */
990static struct kfd_process *create_process(const struct task_struct *thread)
991{
992 struct kfd_process *process;
993 int err = -ENOMEM;
994
995 process = kzalloc(sizeof(*process), GFP_KERNEL);
996 if (!process)
997 goto err_alloc_process;
998
999 kref_init(&process->ref);
1000 mutex_init(&process->mutex);
1001 process->mm = thread->mm;
1002 process->lead_thread = thread->group_leader;
1003 INIT_LIST_HEAD(&process->per_device_data);
1004 INIT_DELAYED_WORK(&process->eviction_work, evict_process_worker);
1005 INIT_DELAYED_WORK(&process->restore_work, restore_process_worker);
1006 process->last_restore_timestamp = get_jiffies_64();
1007 kfd_event_init_process(process);
1008 process->is_32bit_user_mode = in_compat_syscall();
1009
1010 process->pasid = kfd_pasid_alloc();
1011 if (process->pasid == 0)
1012 goto err_alloc_pasid;
1013
1014 if (kfd_alloc_process_doorbells(process) < 0)
1015 goto err_alloc_doorbells;
1016
1017 err = pqm_init(&process->pqm, process);
1018 if (err != 0)
1019 goto err_process_pqm_init;
1020
1021 /* init process apertures*/
1022 err = kfd_init_apertures(process);
1023 if (err != 0)
1024 goto err_init_apertures;
1025
1026 /* Must be last, have to use release destruction after this */
1027 process->mmu_notifier.ops = &kfd_process_mmu_notifier_ops;
1028 err = mmu_notifier_register(&process->mmu_notifier, process->mm);
1029 if (err)
1030 goto err_register_notifier;
1031
1032 get_task_struct(process->lead_thread);
1033 hash_add_rcu(kfd_processes_table, &process->kfd_processes,
1034 (uintptr_t)process->mm);
1035
1036 return process;
1037
1038err_register_notifier:
1039 kfd_process_free_outstanding_kfd_bos(process);
1040 kfd_process_destroy_pdds(process);
1041err_init_apertures:
1042 pqm_uninit(&process->pqm);
1043err_process_pqm_init:
1044 kfd_free_process_doorbells(process);
1045err_alloc_doorbells:
1046 kfd_pasid_free(process->pasid);
1047err_alloc_pasid:
1048 mutex_destroy(&process->mutex);
1049 kfree(process);
1050err_alloc_process:
1051 return ERR_PTR(err);
1052}
1053
1054static int init_doorbell_bitmap(struct qcm_process_device *qpd,
1055 struct kfd_dev *dev)
1056{
1057 unsigned int i;
1058 int range_start = dev->shared_resources.non_cp_doorbells_start;
1059 int range_end = dev->shared_resources.non_cp_doorbells_end;
1060
1061 if (!KFD_IS_SOC15(dev->device_info->asic_family))
1062 return 0;
1063
1064 qpd->doorbell_bitmap =
1065 kzalloc(DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
1066 BITS_PER_BYTE), GFP_KERNEL);
1067 if (!qpd->doorbell_bitmap)
1068 return -ENOMEM;
1069
1070 /* Mask out doorbells reserved for SDMA, IH, and VCN on SOC15. */
1071 pr_debug("reserved doorbell 0x%03x - 0x%03x\n", range_start, range_end);
1072 pr_debug("reserved doorbell 0x%03x - 0x%03x\n",
1073 range_start + KFD_QUEUE_DOORBELL_MIRROR_OFFSET,
1074 range_end + KFD_QUEUE_DOORBELL_MIRROR_OFFSET);
1075
1076 for (i = 0; i < KFD_MAX_NUM_OF_QUEUES_PER_PROCESS / 2; i++) {
1077 if (i >= range_start && i <= range_end) {
1078 set_bit(i, qpd->doorbell_bitmap);
1079 set_bit(i + KFD_QUEUE_DOORBELL_MIRROR_OFFSET,
1080 qpd->doorbell_bitmap);
1081 }
1082 }
1083
1084 return 0;
1085}
1086
1087struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
1088 struct kfd_process *p)
1089{
1090 struct kfd_process_device *pdd = NULL;
1091
1092 list_for_each_entry(pdd, &p->per_device_data, per_device_list)
1093 if (pdd->dev == dev)
1094 return pdd;
1095
1096 return NULL;
1097}
1098
1099struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
1100 struct kfd_process *p)
1101{
1102 struct kfd_process_device *pdd = NULL;
1103
1104 pdd = kzalloc(sizeof(*pdd), GFP_KERNEL);
1105 if (!pdd)
1106 return NULL;
1107
1108 if (init_doorbell_bitmap(&pdd->qpd, dev)) {
1109 pr_err("Failed to init doorbell for process\n");
1110 kfree(pdd);
1111 return NULL;
1112 }
1113
1114 pdd->dev = dev;
1115 INIT_LIST_HEAD(&pdd->qpd.queues_list);
1116 INIT_LIST_HEAD(&pdd->qpd.priv_queue_list);
1117 pdd->qpd.dqm = dev->dqm;
1118 pdd->qpd.pqm = &p->pqm;
1119 pdd->qpd.evicted = 0;
1120 pdd->qpd.mapped_gws_queue = false;
1121 pdd->process = p;
1122 pdd->bound = PDD_UNBOUND;
1123 pdd->already_dequeued = false;
1124 pdd->runtime_inuse = false;
1125 pdd->vram_usage = 0;
1126 pdd->sdma_past_activity_counter = 0;
1127 list_add(&pdd->per_device_list, &p->per_device_data);
1128
1129 /* Init idr used for memory handle translation */
1130 idr_init(&pdd->alloc_idr);
1131
1132 return pdd;
1133}
1134
1135/**
1136 * kfd_process_device_init_vm - Initialize a VM for a process-device
1137 *
1138 * @pdd: The process-device
1139 * @drm_file: Optional pointer to a DRM file descriptor
1140 *
1141 * If @drm_file is specified, it will be used to acquire the VM from
1142 * that file descriptor. If successful, the @pdd takes ownership of
1143 * the file descriptor.
1144 *
1145 * If @drm_file is NULL, a new VM is created.
1146 *
1147 * Returns 0 on success, -errno on failure.
1148 */
1149int kfd_process_device_init_vm(struct kfd_process_device *pdd,
1150 struct file *drm_file)
1151{
1152 struct kfd_process *p;
1153 struct kfd_dev *dev;
1154 int ret;
1155
1156 if (pdd->vm)
1157 return drm_file ? -EBUSY : 0;
1158
1159 p = pdd->process;
1160 dev = pdd->dev;
1161
1162 if (drm_file)
1163 ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(
1164 dev->kgd, drm_file, p->pasid,
1165 &pdd->vm, &p->kgd_process_info, &p->ef);
1166 else
1167 ret = amdgpu_amdkfd_gpuvm_create_process_vm(dev->kgd, p->pasid,
1168 &pdd->vm, &p->kgd_process_info, &p->ef);
1169 if (ret) {
1170 pr_err("Failed to create process VM object\n");
1171 return ret;
1172 }
1173
1174 amdgpu_vm_set_task_info(pdd->vm);
1175
1176 ret = kfd_process_device_reserve_ib_mem(pdd);
1177 if (ret)
1178 goto err_reserve_ib_mem;
1179 ret = kfd_process_device_init_cwsr_dgpu(pdd);
1180 if (ret)
1181 goto err_init_cwsr;
1182
1183 pdd->drm_file = drm_file;
1184
1185 return 0;
1186
1187err_init_cwsr:
1188err_reserve_ib_mem:
1189 kfd_process_device_free_bos(pdd);
1190 if (!drm_file)
1191 amdgpu_amdkfd_gpuvm_destroy_process_vm(dev->kgd, pdd->vm);
1192 pdd->vm = NULL;
1193
1194 return ret;
1195}
1196
1197/*
1198 * Direct the IOMMU to bind the process (specifically the pasid->mm)
1199 * to the device.
1200 * Unbinding occurs when the process dies or the device is removed.
1201 *
1202 * Assumes that the process lock is held.
1203 */
1204struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
1205 struct kfd_process *p)
1206{
1207 struct kfd_process_device *pdd;
1208 int err;
1209
1210 pdd = kfd_get_process_device_data(dev, p);
1211 if (!pdd) {
1212 pr_err("Process device data doesn't exist\n");
1213 return ERR_PTR(-ENOMEM);
1214 }
1215
1216 /*
1217 * signal runtime-pm system to auto resume and prevent
1218 * further runtime suspend once device pdd is created until
1219 * pdd is destroyed.
1220 */
1221 if (!pdd->runtime_inuse) {
1222 err = pm_runtime_get_sync(dev->ddev->dev);
1223 if (err < 0) {
1224 pm_runtime_put_autosuspend(dev->ddev->dev);
1225 return ERR_PTR(err);
1226 }
1227 }
1228
1229 err = kfd_iommu_bind_process_to_device(pdd);
1230 if (err)
1231 goto out;
1232
1233 err = kfd_process_device_init_vm(pdd, NULL);
1234 if (err)
1235 goto out;
1236
1237 /*
1238 * make sure that runtime_usage counter is incremented just once
1239 * per pdd
1240 */
1241 pdd->runtime_inuse = true;
1242
1243 return pdd;
1244
1245out:
1246 /* balance runpm reference count and exit with error */
1247 if (!pdd->runtime_inuse) {
1248 pm_runtime_mark_last_busy(dev->ddev->dev);
1249 pm_runtime_put_autosuspend(dev->ddev->dev);
1250 }
1251
1252 return ERR_PTR(err);
1253}
1254
1255struct kfd_process_device *kfd_get_first_process_device_data(
1256 struct kfd_process *p)
1257{
1258 return list_first_entry(&p->per_device_data,
1259 struct kfd_process_device,
1260 per_device_list);
1261}
1262
1263struct kfd_process_device *kfd_get_next_process_device_data(
1264 struct kfd_process *p,
1265 struct kfd_process_device *pdd)
1266{
1267 if (list_is_last(&pdd->per_device_list, &p->per_device_data))
1268 return NULL;
1269 return list_next_entry(pdd, per_device_list);
1270}
1271
1272bool kfd_has_process_device_data(struct kfd_process *p)
1273{
1274 return !(list_empty(&p->per_device_data));
1275}
1276
1277/* Create specific handle mapped to mem from process local memory idr
1278 * Assumes that the process lock is held.
1279 */
1280int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd,
1281 void *mem)
1282{
1283 return idr_alloc(&pdd->alloc_idr, mem, 0, 0, GFP_KERNEL);
1284}
1285
1286/* Translate specific handle from process local memory idr
1287 * Assumes that the process lock is held.
1288 */
1289void *kfd_process_device_translate_handle(struct kfd_process_device *pdd,
1290 int handle)
1291{
1292 if (handle < 0)
1293 return NULL;
1294
1295 return idr_find(&pdd->alloc_idr, handle);
1296}
1297
1298/* Remove specific handle from process local memory idr
1299 * Assumes that the process lock is held.
1300 */
1301void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd,
1302 int handle)
1303{
1304 if (handle >= 0)
1305 idr_remove(&pdd->alloc_idr, handle);
1306}
1307
1308/* This increments the process->ref counter. */
1309struct kfd_process *kfd_lookup_process_by_pasid(unsigned int pasid)
1310{
1311 struct kfd_process *p, *ret_p = NULL;
1312 unsigned int temp;
1313
1314 int idx = srcu_read_lock(&kfd_processes_srcu);
1315
1316 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1317 if (p->pasid == pasid) {
1318 kref_get(&p->ref);
1319 ret_p = p;
1320 break;
1321 }
1322 }
1323
1324 srcu_read_unlock(&kfd_processes_srcu, idx);
1325
1326 return ret_p;
1327}
1328
1329/* This increments the process->ref counter. */
1330struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm)
1331{
1332 struct kfd_process *p;
1333
1334 int idx = srcu_read_lock(&kfd_processes_srcu);
1335
1336 p = find_process_by_mm(mm);
1337 if (p)
1338 kref_get(&p->ref);
1339
1340 srcu_read_unlock(&kfd_processes_srcu, idx);
1341
1342 return p;
1343}
1344
1345/* kfd_process_evict_queues - Evict all user queues of a process
1346 *
1347 * Eviction is reference-counted per process-device. This means multiple
1348 * evictions from different sources can be nested safely.
1349 */
1350int kfd_process_evict_queues(struct kfd_process *p)
1351{
1352 struct kfd_process_device *pdd;
1353 int r = 0;
1354 unsigned int n_evicted = 0;
1355
1356 list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
1357 r = pdd->dev->dqm->ops.evict_process_queues(pdd->dev->dqm,
1358 &pdd->qpd);
1359 if (r) {
1360 pr_err("Failed to evict process queues\n");
1361 goto fail;
1362 }
1363 n_evicted++;
1364 }
1365
1366 return r;
1367
1368fail:
1369 /* To keep state consistent, roll back partial eviction by
1370 * restoring queues
1371 */
1372 list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
1373 if (n_evicted == 0)
1374 break;
1375 if (pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
1376 &pdd->qpd))
1377 pr_err("Failed to restore queues\n");
1378
1379 n_evicted--;
1380 }
1381
1382 return r;
1383}
1384
1385/* kfd_process_restore_queues - Restore all user queues of a process */
1386int kfd_process_restore_queues(struct kfd_process *p)
1387{
1388 struct kfd_process_device *pdd;
1389 int r, ret = 0;
1390
1391 list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
1392 r = pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
1393 &pdd->qpd);
1394 if (r) {
1395 pr_err("Failed to restore process queues\n");
1396 if (!ret)
1397 ret = r;
1398 }
1399 }
1400
1401 return ret;
1402}
1403
1404static void evict_process_worker(struct work_struct *work)
1405{
1406 int ret;
1407 struct kfd_process *p;
1408 struct delayed_work *dwork;
1409
1410 dwork = to_delayed_work(work);
1411
1412 /* Process termination destroys this worker thread. So during the
1413 * lifetime of this thread, kfd_process p will be valid
1414 */
1415 p = container_of(dwork, struct kfd_process, eviction_work);
1416 WARN_ONCE(p->last_eviction_seqno != p->ef->seqno,
1417 "Eviction fence mismatch\n");
1418
1419 /* Narrow window of overlap between restore and evict work
1420 * item is possible. Once amdgpu_amdkfd_gpuvm_restore_process_bos
1421 * unreserves KFD BOs, it is possible to evicted again. But
1422 * restore has few more steps of finish. So lets wait for any
1423 * previous restore work to complete
1424 */
1425 flush_delayed_work(&p->restore_work);
1426
1427 pr_debug("Started evicting pasid 0x%x\n", p->pasid);
1428 ret = kfd_process_evict_queues(p);
1429 if (!ret) {
1430 dma_fence_signal(p->ef);
1431 dma_fence_put(p->ef);
1432 p->ef = NULL;
1433 queue_delayed_work(kfd_restore_wq, &p->restore_work,
1434 msecs_to_jiffies(PROCESS_RESTORE_TIME_MS));
1435
1436 pr_debug("Finished evicting pasid 0x%x\n", p->pasid);
1437 } else
1438 pr_err("Failed to evict queues of pasid 0x%x\n", p->pasid);
1439}
1440
1441static void restore_process_worker(struct work_struct *work)
1442{
1443 struct delayed_work *dwork;
1444 struct kfd_process *p;
1445 int ret = 0;
1446
1447 dwork = to_delayed_work(work);
1448
1449 /* Process termination destroys this worker thread. So during the
1450 * lifetime of this thread, kfd_process p will be valid
1451 */
1452 p = container_of(dwork, struct kfd_process, restore_work);
1453 pr_debug("Started restoring pasid 0x%x\n", p->pasid);
1454
1455 /* Setting last_restore_timestamp before successful restoration.
1456 * Otherwise this would have to be set by KGD (restore_process_bos)
1457 * before KFD BOs are unreserved. If not, the process can be evicted
1458 * again before the timestamp is set.
1459 * If restore fails, the timestamp will be set again in the next
1460 * attempt. This would mean that the minimum GPU quanta would be
1461 * PROCESS_ACTIVE_TIME_MS - (time to execute the following two
1462 * functions)
1463 */
1464
1465 p->last_restore_timestamp = get_jiffies_64();
1466 ret = amdgpu_amdkfd_gpuvm_restore_process_bos(p->kgd_process_info,
1467 &p->ef);
1468 if (ret) {
1469 pr_debug("Failed to restore BOs of pasid 0x%x, retry after %d ms\n",
1470 p->pasid, PROCESS_BACK_OFF_TIME_MS);
1471 ret = queue_delayed_work(kfd_restore_wq, &p->restore_work,
1472 msecs_to_jiffies(PROCESS_BACK_OFF_TIME_MS));
1473 WARN(!ret, "reschedule restore work failed\n");
1474 return;
1475 }
1476
1477 ret = kfd_process_restore_queues(p);
1478 if (!ret)
1479 pr_debug("Finished restoring pasid 0x%x\n", p->pasid);
1480 else
1481 pr_err("Failed to restore queues of pasid 0x%x\n", p->pasid);
1482}
1483
1484void kfd_suspend_all_processes(void)
1485{
1486 struct kfd_process *p;
1487 unsigned int temp;
1488 int idx = srcu_read_lock(&kfd_processes_srcu);
1489
1490 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1491 cancel_delayed_work_sync(&p->eviction_work);
1492 cancel_delayed_work_sync(&p->restore_work);
1493
1494 if (kfd_process_evict_queues(p))
1495 pr_err("Failed to suspend process 0x%x\n", p->pasid);
1496 dma_fence_signal(p->ef);
1497 dma_fence_put(p->ef);
1498 p->ef = NULL;
1499 }
1500 srcu_read_unlock(&kfd_processes_srcu, idx);
1501}
1502
1503int kfd_resume_all_processes(void)
1504{
1505 struct kfd_process *p;
1506 unsigned int temp;
1507 int ret = 0, idx = srcu_read_lock(&kfd_processes_srcu);
1508
1509 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1510 if (!queue_delayed_work(kfd_restore_wq, &p->restore_work, 0)) {
1511 pr_err("Restore process %d failed during resume\n",
1512 p->pasid);
1513 ret = -EFAULT;
1514 }
1515 }
1516 srcu_read_unlock(&kfd_processes_srcu, idx);
1517 return ret;
1518}
1519
1520int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process,
1521 struct vm_area_struct *vma)
1522{
1523 struct kfd_process_device *pdd;
1524 struct qcm_process_device *qpd;
1525
1526 if ((vma->vm_end - vma->vm_start) != KFD_CWSR_TBA_TMA_SIZE) {
1527 pr_err("Incorrect CWSR mapping size.\n");
1528 return -EINVAL;
1529 }
1530
1531 pdd = kfd_get_process_device_data(dev, process);
1532 if (!pdd)
1533 return -EINVAL;
1534 qpd = &pdd->qpd;
1535
1536 qpd->cwsr_kaddr = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1537 get_order(KFD_CWSR_TBA_TMA_SIZE));
1538 if (!qpd->cwsr_kaddr) {
1539 pr_err("Error allocating per process CWSR buffer.\n");
1540 return -ENOMEM;
1541 }
1542
1543 vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND
1544 | VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP;
1545 /* Mapping pages to user process */
1546 return remap_pfn_range(vma, vma->vm_start,
1547 PFN_DOWN(__pa(qpd->cwsr_kaddr)),
1548 KFD_CWSR_TBA_TMA_SIZE, vma->vm_page_prot);
1549}
1550
1551void kfd_flush_tlb(struct kfd_process_device *pdd)
1552{
1553 struct kfd_dev *dev = pdd->dev;
1554
1555 if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
1556 /* Nothing to flush until a VMID is assigned, which
1557 * only happens when the first queue is created.
1558 */
1559 if (pdd->qpd.vmid)
1560 amdgpu_amdkfd_flush_gpu_tlb_vmid(dev->kgd,
1561 pdd->qpd.vmid);
1562 } else {
1563 amdgpu_amdkfd_flush_gpu_tlb_pasid(dev->kgd,
1564 pdd->process->pasid);
1565 }
1566}
1567
1568#if defined(CONFIG_DEBUG_FS)
1569
1570int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data)
1571{
1572 struct kfd_process *p;
1573 unsigned int temp;
1574 int r = 0;
1575
1576 int idx = srcu_read_lock(&kfd_processes_srcu);
1577
1578 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1579 seq_printf(m, "Process %d PASID 0x%x:\n",
1580 p->lead_thread->tgid, p->pasid);
1581
1582 mutex_lock(&p->mutex);
1583 r = pqm_debugfs_mqds(m, &p->pqm);
1584 mutex_unlock(&p->mutex);
1585
1586 if (r)
1587 break;
1588 }
1589
1590 srcu_read_unlock(&kfd_processes_srcu, idx);
1591
1592 return r;
1593}
1594
1595#endif
1596