Loading...
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/ratelimit.h>
25#include <linux/printk.h>
26#include <linux/slab.h>
27#include <linux/list.h>
28#include <linux/types.h>
29#include <linux/bitops.h>
30#include <linux/sched.h>
31#include "kfd_priv.h"
32#include "kfd_device_queue_manager.h"
33#include "kfd_mqd_manager.h"
34#include "cik_regs.h"
35#include "kfd_kernel_queue.h"
36#include "amdgpu_amdkfd.h"
37
38/* Size of the per-pipe EOP queue */
39#define CIK_HPD_EOP_BYTES_LOG2 11
40#define CIK_HPD_EOP_BYTES (1U << CIK_HPD_EOP_BYTES_LOG2)
41
42static int set_pasid_vmid_mapping(struct device_queue_manager *dqm,
43 unsigned int pasid, unsigned int vmid);
44
45static int execute_queues_cpsch(struct device_queue_manager *dqm,
46 enum kfd_unmap_queues_filter filter,
47 uint32_t filter_param);
48static int unmap_queues_cpsch(struct device_queue_manager *dqm,
49 enum kfd_unmap_queues_filter filter,
50 uint32_t filter_param);
51
52static int map_queues_cpsch(struct device_queue_manager *dqm);
53
54static void deallocate_sdma_queue(struct device_queue_manager *dqm,
55 struct queue *q);
56
57static inline void deallocate_hqd(struct device_queue_manager *dqm,
58 struct queue *q);
59static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q);
60static int allocate_sdma_queue(struct device_queue_manager *dqm,
61 struct queue *q);
62static void kfd_process_hw_exception(struct work_struct *work);
63
64static inline
65enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type)
66{
67 if (type == KFD_QUEUE_TYPE_SDMA || type == KFD_QUEUE_TYPE_SDMA_XGMI)
68 return KFD_MQD_TYPE_SDMA;
69 return KFD_MQD_TYPE_CP;
70}
71
72static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe)
73{
74 int i;
75 int pipe_offset = mec * dqm->dev->shared_resources.num_pipe_per_mec
76 + pipe * dqm->dev->shared_resources.num_queue_per_pipe;
77
78 /* queue is available for KFD usage if bit is 1 */
79 for (i = 0; i < dqm->dev->shared_resources.num_queue_per_pipe; ++i)
80 if (test_bit(pipe_offset + i,
81 dqm->dev->shared_resources.cp_queue_bitmap))
82 return true;
83 return false;
84}
85
86unsigned int get_cp_queues_num(struct device_queue_manager *dqm)
87{
88 return bitmap_weight(dqm->dev->shared_resources.cp_queue_bitmap,
89 KGD_MAX_QUEUES);
90}
91
92unsigned int get_queues_per_pipe(struct device_queue_manager *dqm)
93{
94 return dqm->dev->shared_resources.num_queue_per_pipe;
95}
96
97unsigned int get_pipes_per_mec(struct device_queue_manager *dqm)
98{
99 return dqm->dev->shared_resources.num_pipe_per_mec;
100}
101
102static unsigned int get_num_sdma_engines(struct device_queue_manager *dqm)
103{
104 return dqm->dev->device_info->num_sdma_engines;
105}
106
107static unsigned int get_num_xgmi_sdma_engines(struct device_queue_manager *dqm)
108{
109 return dqm->dev->device_info->num_xgmi_sdma_engines;
110}
111
112static unsigned int get_num_all_sdma_engines(struct device_queue_manager *dqm)
113{
114 return get_num_sdma_engines(dqm) + get_num_xgmi_sdma_engines(dqm);
115}
116
117unsigned int get_num_sdma_queues(struct device_queue_manager *dqm)
118{
119 return dqm->dev->device_info->num_sdma_engines
120 * dqm->dev->device_info->num_sdma_queues_per_engine;
121}
122
123unsigned int get_num_xgmi_sdma_queues(struct device_queue_manager *dqm)
124{
125 return dqm->dev->device_info->num_xgmi_sdma_engines
126 * dqm->dev->device_info->num_sdma_queues_per_engine;
127}
128
129void program_sh_mem_settings(struct device_queue_manager *dqm,
130 struct qcm_process_device *qpd)
131{
132 return dqm->dev->kfd2kgd->program_sh_mem_settings(
133 dqm->dev->kgd, qpd->vmid,
134 qpd->sh_mem_config,
135 qpd->sh_mem_ape1_base,
136 qpd->sh_mem_ape1_limit,
137 qpd->sh_mem_bases);
138}
139
140static void increment_queue_count(struct device_queue_manager *dqm,
141 enum kfd_queue_type type)
142{
143 dqm->active_queue_count++;
144 if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ)
145 dqm->active_cp_queue_count++;
146}
147
148static void decrement_queue_count(struct device_queue_manager *dqm,
149 enum kfd_queue_type type)
150{
151 dqm->active_queue_count--;
152 if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ)
153 dqm->active_cp_queue_count--;
154}
155
156int read_sdma_queue_counter(uint64_t q_rptr, uint64_t *val)
157{
158 int ret;
159 uint64_t tmp = 0;
160
161 if (!val)
162 return -EINVAL;
163 /*
164 * SDMA activity counter is stored at queue's RPTR + 0x8 location.
165 */
166 if (!access_ok((const void __user *)(q_rptr +
167 sizeof(uint64_t)), sizeof(uint64_t))) {
168 pr_err("Can't access sdma queue activity counter\n");
169 return -EFAULT;
170 }
171
172 ret = get_user(tmp, (uint64_t *)(q_rptr + sizeof(uint64_t)));
173 if (!ret) {
174 *val = tmp;
175 }
176
177 return ret;
178}
179
180static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q)
181{
182 struct kfd_dev *dev = qpd->dqm->dev;
183
184 if (!KFD_IS_SOC15(dev->device_info->asic_family)) {
185 /* On pre-SOC15 chips we need to use the queue ID to
186 * preserve the user mode ABI.
187 */
188 q->doorbell_id = q->properties.queue_id;
189 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
190 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
191 /* For SDMA queues on SOC15 with 8-byte doorbell, use static
192 * doorbell assignments based on the engine and queue id.
193 * The doobell index distance between RLC (2*i) and (2*i+1)
194 * for a SDMA engine is 512.
195 */
196 uint32_t *idx_offset =
197 dev->shared_resources.sdma_doorbell_idx;
198
199 q->doorbell_id = idx_offset[q->properties.sdma_engine_id]
200 + (q->properties.sdma_queue_id & 1)
201 * KFD_QUEUE_DOORBELL_MIRROR_OFFSET
202 + (q->properties.sdma_queue_id >> 1);
203 } else {
204 /* For CP queues on SOC15 reserve a free doorbell ID */
205 unsigned int found;
206
207 found = find_first_zero_bit(qpd->doorbell_bitmap,
208 KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
209 if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) {
210 pr_debug("No doorbells available");
211 return -EBUSY;
212 }
213 set_bit(found, qpd->doorbell_bitmap);
214 q->doorbell_id = found;
215 }
216
217 q->properties.doorbell_off =
218 kfd_get_doorbell_dw_offset_in_bar(dev, q->process,
219 q->doorbell_id);
220
221 return 0;
222}
223
224static void deallocate_doorbell(struct qcm_process_device *qpd,
225 struct queue *q)
226{
227 unsigned int old;
228 struct kfd_dev *dev = qpd->dqm->dev;
229
230 if (!KFD_IS_SOC15(dev->device_info->asic_family) ||
231 q->properties.type == KFD_QUEUE_TYPE_SDMA ||
232 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
233 return;
234
235 old = test_and_clear_bit(q->doorbell_id, qpd->doorbell_bitmap);
236 WARN_ON(!old);
237}
238
239static int allocate_vmid(struct device_queue_manager *dqm,
240 struct qcm_process_device *qpd,
241 struct queue *q)
242{
243 int allocated_vmid = -1, i;
244
245 for (i = dqm->dev->vm_info.first_vmid_kfd;
246 i <= dqm->dev->vm_info.last_vmid_kfd; i++) {
247 if (!dqm->vmid_pasid[i]) {
248 allocated_vmid = i;
249 break;
250 }
251 }
252
253 if (allocated_vmid < 0) {
254 pr_err("no more vmid to allocate\n");
255 return -ENOSPC;
256 }
257
258 pr_debug("vmid allocated: %d\n", allocated_vmid);
259
260 dqm->vmid_pasid[allocated_vmid] = q->process->pasid;
261
262 set_pasid_vmid_mapping(dqm, q->process->pasid, allocated_vmid);
263
264 qpd->vmid = allocated_vmid;
265 q->properties.vmid = allocated_vmid;
266
267 program_sh_mem_settings(dqm, qpd);
268
269 /* qpd->page_table_base is set earlier when register_process()
270 * is called, i.e. when the first queue is created.
271 */
272 dqm->dev->kfd2kgd->set_vm_context_page_table_base(dqm->dev->kgd,
273 qpd->vmid,
274 qpd->page_table_base);
275 /* invalidate the VM context after pasid and vmid mapping is set up */
276 kfd_flush_tlb(qpd_to_pdd(qpd));
277
278 if (dqm->dev->kfd2kgd->set_scratch_backing_va)
279 dqm->dev->kfd2kgd->set_scratch_backing_va(dqm->dev->kgd,
280 qpd->sh_hidden_private_base, qpd->vmid);
281
282 return 0;
283}
284
285static int flush_texture_cache_nocpsch(struct kfd_dev *kdev,
286 struct qcm_process_device *qpd)
287{
288 const struct packet_manager_funcs *pmf = qpd->dqm->packets.pmf;
289 int ret;
290
291 if (!qpd->ib_kaddr)
292 return -ENOMEM;
293
294 ret = pmf->release_mem(qpd->ib_base, (uint32_t *)qpd->ib_kaddr);
295 if (ret)
296 return ret;
297
298 return amdgpu_amdkfd_submit_ib(kdev->kgd, KGD_ENGINE_MEC1, qpd->vmid,
299 qpd->ib_base, (uint32_t *)qpd->ib_kaddr,
300 pmf->release_mem_size / sizeof(uint32_t));
301}
302
303static void deallocate_vmid(struct device_queue_manager *dqm,
304 struct qcm_process_device *qpd,
305 struct queue *q)
306{
307 /* On GFX v7, CP doesn't flush TC at dequeue */
308 if (q->device->device_info->asic_family == CHIP_HAWAII)
309 if (flush_texture_cache_nocpsch(q->device, qpd))
310 pr_err("Failed to flush TC\n");
311
312 kfd_flush_tlb(qpd_to_pdd(qpd));
313
314 /* Release the vmid mapping */
315 set_pasid_vmid_mapping(dqm, 0, qpd->vmid);
316 dqm->vmid_pasid[qpd->vmid] = 0;
317
318 qpd->vmid = 0;
319 q->properties.vmid = 0;
320}
321
322static int create_queue_nocpsch(struct device_queue_manager *dqm,
323 struct queue *q,
324 struct qcm_process_device *qpd)
325{
326 struct mqd_manager *mqd_mgr;
327 int retval;
328
329 dqm_lock(dqm);
330
331 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
332 pr_warn("Can't create new usermode queue because %d queues were already created\n",
333 dqm->total_queue_count);
334 retval = -EPERM;
335 goto out_unlock;
336 }
337
338 if (list_empty(&qpd->queues_list)) {
339 retval = allocate_vmid(dqm, qpd, q);
340 if (retval)
341 goto out_unlock;
342 }
343 q->properties.vmid = qpd->vmid;
344 /*
345 * Eviction state logic: mark all queues as evicted, even ones
346 * not currently active. Restoring inactive queues later only
347 * updates the is_evicted flag but is a no-op otherwise.
348 */
349 q->properties.is_evicted = !!qpd->evicted;
350
351 q->properties.tba_addr = qpd->tba_addr;
352 q->properties.tma_addr = qpd->tma_addr;
353
354 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
355 q->properties.type)];
356 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
357 retval = allocate_hqd(dqm, q);
358 if (retval)
359 goto deallocate_vmid;
360 pr_debug("Loading mqd to hqd on pipe %d, queue %d\n",
361 q->pipe, q->queue);
362 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
363 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
364 retval = allocate_sdma_queue(dqm, q);
365 if (retval)
366 goto deallocate_vmid;
367 dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
368 }
369
370 retval = allocate_doorbell(qpd, q);
371 if (retval)
372 goto out_deallocate_hqd;
373
374 /* Temporarily release dqm lock to avoid a circular lock dependency */
375 dqm_unlock(dqm);
376 q->mqd_mem_obj = mqd_mgr->allocate_mqd(mqd_mgr->dev, &q->properties);
377 dqm_lock(dqm);
378
379 if (!q->mqd_mem_obj) {
380 retval = -ENOMEM;
381 goto out_deallocate_doorbell;
382 }
383 mqd_mgr->init_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj,
384 &q->gart_mqd_addr, &q->properties);
385 if (q->properties.is_active) {
386 if (!dqm->sched_running) {
387 WARN_ONCE(1, "Load non-HWS mqd while stopped\n");
388 goto add_queue_to_list;
389 }
390
391 if (WARN(q->process->mm != current->mm,
392 "should only run in user thread"))
393 retval = -EFAULT;
394 else
395 retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
396 q->queue, &q->properties, current->mm);
397 if (retval)
398 goto out_free_mqd;
399 }
400
401add_queue_to_list:
402 list_add(&q->list, &qpd->queues_list);
403 qpd->queue_count++;
404 if (q->properties.is_active)
405 increment_queue_count(dqm, q->properties.type);
406
407 /*
408 * Unconditionally increment this counter, regardless of the queue's
409 * type or whether the queue is active.
410 */
411 dqm->total_queue_count++;
412 pr_debug("Total of %d queues are accountable so far\n",
413 dqm->total_queue_count);
414 goto out_unlock;
415
416out_free_mqd:
417 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
418out_deallocate_doorbell:
419 deallocate_doorbell(qpd, q);
420out_deallocate_hqd:
421 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
422 deallocate_hqd(dqm, q);
423 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
424 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
425 deallocate_sdma_queue(dqm, q);
426deallocate_vmid:
427 if (list_empty(&qpd->queues_list))
428 deallocate_vmid(dqm, qpd, q);
429out_unlock:
430 dqm_unlock(dqm);
431 return retval;
432}
433
434static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
435{
436 bool set;
437 int pipe, bit, i;
438
439 set = false;
440
441 for (pipe = dqm->next_pipe_to_allocate, i = 0;
442 i < get_pipes_per_mec(dqm);
443 pipe = ((pipe + 1) % get_pipes_per_mec(dqm)), ++i) {
444
445 if (!is_pipe_enabled(dqm, 0, pipe))
446 continue;
447
448 if (dqm->allocated_queues[pipe] != 0) {
449 bit = ffs(dqm->allocated_queues[pipe]) - 1;
450 dqm->allocated_queues[pipe] &= ~(1 << bit);
451 q->pipe = pipe;
452 q->queue = bit;
453 set = true;
454 break;
455 }
456 }
457
458 if (!set)
459 return -EBUSY;
460
461 pr_debug("hqd slot - pipe %d, queue %d\n", q->pipe, q->queue);
462 /* horizontal hqd allocation */
463 dqm->next_pipe_to_allocate = (pipe + 1) % get_pipes_per_mec(dqm);
464
465 return 0;
466}
467
468static inline void deallocate_hqd(struct device_queue_manager *dqm,
469 struct queue *q)
470{
471 dqm->allocated_queues[q->pipe] |= (1 << q->queue);
472}
473
474/* Access to DQM has to be locked before calling destroy_queue_nocpsch_locked
475 * to avoid asynchronized access
476 */
477static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
478 struct qcm_process_device *qpd,
479 struct queue *q)
480{
481 int retval;
482 struct mqd_manager *mqd_mgr;
483
484 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
485 q->properties.type)];
486
487 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
488 deallocate_hqd(dqm, q);
489 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
490 deallocate_sdma_queue(dqm, q);
491 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
492 deallocate_sdma_queue(dqm, q);
493 else {
494 pr_debug("q->properties.type %d is invalid\n",
495 q->properties.type);
496 return -EINVAL;
497 }
498 dqm->total_queue_count--;
499
500 deallocate_doorbell(qpd, q);
501
502 if (!dqm->sched_running) {
503 WARN_ONCE(1, "Destroy non-HWS queue while stopped\n");
504 return 0;
505 }
506
507 retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
508 KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
509 KFD_UNMAP_LATENCY_MS,
510 q->pipe, q->queue);
511 if (retval == -ETIME)
512 qpd->reset_wavefronts = true;
513
514
515 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
516
517 list_del(&q->list);
518 if (list_empty(&qpd->queues_list)) {
519 if (qpd->reset_wavefronts) {
520 pr_warn("Resetting wave fronts (nocpsch) on dev %p\n",
521 dqm->dev);
522 /* dbgdev_wave_reset_wavefronts has to be called before
523 * deallocate_vmid(), i.e. when vmid is still in use.
524 */
525 dbgdev_wave_reset_wavefronts(dqm->dev,
526 qpd->pqm->process);
527 qpd->reset_wavefronts = false;
528 }
529
530 deallocate_vmid(dqm, qpd, q);
531 }
532 qpd->queue_count--;
533 if (q->properties.is_active) {
534 decrement_queue_count(dqm, q->properties.type);
535 if (q->properties.is_gws) {
536 dqm->gws_queue_count--;
537 qpd->mapped_gws_queue = false;
538 }
539 }
540
541 return retval;
542}
543
544static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
545 struct qcm_process_device *qpd,
546 struct queue *q)
547{
548 int retval;
549 uint64_t sdma_val = 0;
550 struct kfd_process_device *pdd = qpd_to_pdd(qpd);
551
552 /* Get the SDMA queue stats */
553 if ((q->properties.type == KFD_QUEUE_TYPE_SDMA) ||
554 (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
555 retval = read_sdma_queue_counter((uint64_t)q->properties.read_ptr,
556 &sdma_val);
557 if (retval)
558 pr_err("Failed to read SDMA queue counter for queue: %d\n",
559 q->properties.queue_id);
560 }
561
562 dqm_lock(dqm);
563 retval = destroy_queue_nocpsch_locked(dqm, qpd, q);
564 if (!retval)
565 pdd->sdma_past_activity_counter += sdma_val;
566 dqm_unlock(dqm);
567
568 return retval;
569}
570
571static int update_queue(struct device_queue_manager *dqm, struct queue *q)
572{
573 int retval = 0;
574 struct mqd_manager *mqd_mgr;
575 struct kfd_process_device *pdd;
576 bool prev_active = false;
577
578 dqm_lock(dqm);
579 pdd = kfd_get_process_device_data(q->device, q->process);
580 if (!pdd) {
581 retval = -ENODEV;
582 goto out_unlock;
583 }
584 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
585 q->properties.type)];
586
587 /* Save previous activity state for counters */
588 prev_active = q->properties.is_active;
589
590 /* Make sure the queue is unmapped before updating the MQD */
591 if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) {
592 retval = unmap_queues_cpsch(dqm,
593 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
594 if (retval) {
595 pr_err("unmap queue failed\n");
596 goto out_unlock;
597 }
598 } else if (prev_active &&
599 (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
600 q->properties.type == KFD_QUEUE_TYPE_SDMA ||
601 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
602
603 if (!dqm->sched_running) {
604 WARN_ONCE(1, "Update non-HWS queue while stopped\n");
605 goto out_unlock;
606 }
607
608 retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
609 KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
610 KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
611 if (retval) {
612 pr_err("destroy mqd failed\n");
613 goto out_unlock;
614 }
615 }
616
617 mqd_mgr->update_mqd(mqd_mgr, q->mqd, &q->properties);
618
619 /*
620 * check active state vs. the previous state and modify
621 * counter accordingly. map_queues_cpsch uses the
622 * dqm->active_queue_count to determine whether a new runlist must be
623 * uploaded.
624 */
625 if (q->properties.is_active && !prev_active)
626 increment_queue_count(dqm, q->properties.type);
627 else if (!q->properties.is_active && prev_active)
628 decrement_queue_count(dqm, q->properties.type);
629
630 if (q->gws && !q->properties.is_gws) {
631 if (q->properties.is_active) {
632 dqm->gws_queue_count++;
633 pdd->qpd.mapped_gws_queue = true;
634 }
635 q->properties.is_gws = true;
636 } else if (!q->gws && q->properties.is_gws) {
637 if (q->properties.is_active) {
638 dqm->gws_queue_count--;
639 pdd->qpd.mapped_gws_queue = false;
640 }
641 q->properties.is_gws = false;
642 }
643
644 if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS)
645 retval = map_queues_cpsch(dqm);
646 else if (q->properties.is_active &&
647 (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
648 q->properties.type == KFD_QUEUE_TYPE_SDMA ||
649 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
650 if (WARN(q->process->mm != current->mm,
651 "should only run in user thread"))
652 retval = -EFAULT;
653 else
654 retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd,
655 q->pipe, q->queue,
656 &q->properties, current->mm);
657 }
658
659out_unlock:
660 dqm_unlock(dqm);
661 return retval;
662}
663
664static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
665 struct qcm_process_device *qpd)
666{
667 struct queue *q;
668 struct mqd_manager *mqd_mgr;
669 struct kfd_process_device *pdd;
670 int retval, ret = 0;
671
672 dqm_lock(dqm);
673 if (qpd->evicted++ > 0) /* already evicted, do nothing */
674 goto out;
675
676 pdd = qpd_to_pdd(qpd);
677 pr_info_ratelimited("Evicting PASID 0x%x queues\n",
678 pdd->process->pasid);
679
680 /* Mark all queues as evicted. Deactivate all active queues on
681 * the qpd.
682 */
683 list_for_each_entry(q, &qpd->queues_list, list) {
684 q->properties.is_evicted = true;
685 if (!q->properties.is_active)
686 continue;
687
688 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
689 q->properties.type)];
690 q->properties.is_active = false;
691 decrement_queue_count(dqm, q->properties.type);
692 if (q->properties.is_gws) {
693 dqm->gws_queue_count--;
694 qpd->mapped_gws_queue = false;
695 }
696
697 if (WARN_ONCE(!dqm->sched_running, "Evict when stopped\n"))
698 continue;
699
700 retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
701 KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
702 KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
703 if (retval && !ret)
704 /* Return the first error, but keep going to
705 * maintain a consistent eviction state
706 */
707 ret = retval;
708 }
709
710out:
711 dqm_unlock(dqm);
712 return ret;
713}
714
715static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
716 struct qcm_process_device *qpd)
717{
718 struct queue *q;
719 struct kfd_process_device *pdd;
720 int retval = 0;
721
722 dqm_lock(dqm);
723 if (qpd->evicted++ > 0) /* already evicted, do nothing */
724 goto out;
725
726 pdd = qpd_to_pdd(qpd);
727 pr_info_ratelimited("Evicting PASID 0x%x queues\n",
728 pdd->process->pasid);
729
730 /* Mark all queues as evicted. Deactivate all active queues on
731 * the qpd.
732 */
733 list_for_each_entry(q, &qpd->queues_list, list) {
734 q->properties.is_evicted = true;
735 if (!q->properties.is_active)
736 continue;
737
738 q->properties.is_active = false;
739 decrement_queue_count(dqm, q->properties.type);
740 }
741 retval = execute_queues_cpsch(dqm,
742 qpd->is_debug ?
743 KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES :
744 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
745
746out:
747 dqm_unlock(dqm);
748 return retval;
749}
750
751static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
752 struct qcm_process_device *qpd)
753{
754 struct mm_struct *mm = NULL;
755 struct queue *q;
756 struct mqd_manager *mqd_mgr;
757 struct kfd_process_device *pdd;
758 uint64_t pd_base;
759 int retval, ret = 0;
760
761 pdd = qpd_to_pdd(qpd);
762 /* Retrieve PD base */
763 pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->vm);
764
765 dqm_lock(dqm);
766 if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
767 goto out;
768 if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */
769 qpd->evicted--;
770 goto out;
771 }
772
773 pr_info_ratelimited("Restoring PASID 0x%x queues\n",
774 pdd->process->pasid);
775
776 /* Update PD Base in QPD */
777 qpd->page_table_base = pd_base;
778 pr_debug("Updated PD address to 0x%llx\n", pd_base);
779
780 if (!list_empty(&qpd->queues_list)) {
781 dqm->dev->kfd2kgd->set_vm_context_page_table_base(
782 dqm->dev->kgd,
783 qpd->vmid,
784 qpd->page_table_base);
785 kfd_flush_tlb(pdd);
786 }
787
788 /* Take a safe reference to the mm_struct, which may otherwise
789 * disappear even while the kfd_process is still referenced.
790 */
791 mm = get_task_mm(pdd->process->lead_thread);
792 if (!mm) {
793 ret = -EFAULT;
794 goto out;
795 }
796
797 /* Remove the eviction flags. Activate queues that are not
798 * inactive for other reasons.
799 */
800 list_for_each_entry(q, &qpd->queues_list, list) {
801 q->properties.is_evicted = false;
802 if (!QUEUE_IS_ACTIVE(q->properties))
803 continue;
804
805 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
806 q->properties.type)];
807 q->properties.is_active = true;
808 increment_queue_count(dqm, q->properties.type);
809 if (q->properties.is_gws) {
810 dqm->gws_queue_count++;
811 qpd->mapped_gws_queue = true;
812 }
813
814 if (WARN_ONCE(!dqm->sched_running, "Restore when stopped\n"))
815 continue;
816
817 retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
818 q->queue, &q->properties, mm);
819 if (retval && !ret)
820 /* Return the first error, but keep going to
821 * maintain a consistent eviction state
822 */
823 ret = retval;
824 }
825 qpd->evicted = 0;
826out:
827 if (mm)
828 mmput(mm);
829 dqm_unlock(dqm);
830 return ret;
831}
832
833static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
834 struct qcm_process_device *qpd)
835{
836 struct queue *q;
837 struct kfd_process_device *pdd;
838 uint64_t pd_base;
839 int retval = 0;
840
841 pdd = qpd_to_pdd(qpd);
842 /* Retrieve PD base */
843 pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->vm);
844
845 dqm_lock(dqm);
846 if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
847 goto out;
848 if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */
849 qpd->evicted--;
850 goto out;
851 }
852
853 pr_info_ratelimited("Restoring PASID 0x%x queues\n",
854 pdd->process->pasid);
855
856 /* Update PD Base in QPD */
857 qpd->page_table_base = pd_base;
858 pr_debug("Updated PD address to 0x%llx\n", pd_base);
859
860 /* activate all active queues on the qpd */
861 list_for_each_entry(q, &qpd->queues_list, list) {
862 q->properties.is_evicted = false;
863 if (!QUEUE_IS_ACTIVE(q->properties))
864 continue;
865
866 q->properties.is_active = true;
867 increment_queue_count(dqm, q->properties.type);
868 }
869 retval = execute_queues_cpsch(dqm,
870 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
871 qpd->evicted = 0;
872out:
873 dqm_unlock(dqm);
874 return retval;
875}
876
877static int register_process(struct device_queue_manager *dqm,
878 struct qcm_process_device *qpd)
879{
880 struct device_process_node *n;
881 struct kfd_process_device *pdd;
882 uint64_t pd_base;
883 int retval;
884
885 n = kzalloc(sizeof(*n), GFP_KERNEL);
886 if (!n)
887 return -ENOMEM;
888
889 n->qpd = qpd;
890
891 pdd = qpd_to_pdd(qpd);
892 /* Retrieve PD base */
893 pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->vm);
894
895 dqm_lock(dqm);
896 list_add(&n->list, &dqm->queues);
897
898 /* Update PD Base in QPD */
899 qpd->page_table_base = pd_base;
900 pr_debug("Updated PD address to 0x%llx\n", pd_base);
901
902 retval = dqm->asic_ops.update_qpd(dqm, qpd);
903
904 dqm->processes_count++;
905
906 dqm_unlock(dqm);
907
908 /* Outside the DQM lock because under the DQM lock we can't do
909 * reclaim or take other locks that others hold while reclaiming.
910 */
911 kfd_inc_compute_active(dqm->dev);
912
913 return retval;
914}
915
916static int unregister_process(struct device_queue_manager *dqm,
917 struct qcm_process_device *qpd)
918{
919 int retval;
920 struct device_process_node *cur, *next;
921
922 pr_debug("qpd->queues_list is %s\n",
923 list_empty(&qpd->queues_list) ? "empty" : "not empty");
924
925 retval = 0;
926 dqm_lock(dqm);
927
928 list_for_each_entry_safe(cur, next, &dqm->queues, list) {
929 if (qpd == cur->qpd) {
930 list_del(&cur->list);
931 kfree(cur);
932 dqm->processes_count--;
933 goto out;
934 }
935 }
936 /* qpd not found in dqm list */
937 retval = 1;
938out:
939 dqm_unlock(dqm);
940
941 /* Outside the DQM lock because under the DQM lock we can't do
942 * reclaim or take other locks that others hold while reclaiming.
943 */
944 if (!retval)
945 kfd_dec_compute_active(dqm->dev);
946
947 return retval;
948}
949
950static int
951set_pasid_vmid_mapping(struct device_queue_manager *dqm, unsigned int pasid,
952 unsigned int vmid)
953{
954 return dqm->dev->kfd2kgd->set_pasid_vmid_mapping(
955 dqm->dev->kgd, pasid, vmid);
956}
957
958static void init_interrupts(struct device_queue_manager *dqm)
959{
960 unsigned int i;
961
962 for (i = 0 ; i < get_pipes_per_mec(dqm) ; i++)
963 if (is_pipe_enabled(dqm, 0, i))
964 dqm->dev->kfd2kgd->init_interrupts(dqm->dev->kgd, i);
965}
966
967static int initialize_nocpsch(struct device_queue_manager *dqm)
968{
969 int pipe, queue;
970
971 pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
972
973 dqm->allocated_queues = kcalloc(get_pipes_per_mec(dqm),
974 sizeof(unsigned int), GFP_KERNEL);
975 if (!dqm->allocated_queues)
976 return -ENOMEM;
977
978 mutex_init(&dqm->lock_hidden);
979 INIT_LIST_HEAD(&dqm->queues);
980 dqm->active_queue_count = dqm->next_pipe_to_allocate = 0;
981 dqm->active_cp_queue_count = 0;
982 dqm->gws_queue_count = 0;
983
984 for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
985 int pipe_offset = pipe * get_queues_per_pipe(dqm);
986
987 for (queue = 0; queue < get_queues_per_pipe(dqm); queue++)
988 if (test_bit(pipe_offset + queue,
989 dqm->dev->shared_resources.cp_queue_bitmap))
990 dqm->allocated_queues[pipe] |= 1 << queue;
991 }
992
993 memset(dqm->vmid_pasid, 0, sizeof(dqm->vmid_pasid));
994
995 dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm));
996 dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm));
997
998 return 0;
999}
1000
1001static void uninitialize(struct device_queue_manager *dqm)
1002{
1003 int i;
1004
1005 WARN_ON(dqm->active_queue_count > 0 || dqm->processes_count > 0);
1006
1007 kfree(dqm->allocated_queues);
1008 for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
1009 kfree(dqm->mqd_mgrs[i]);
1010 mutex_destroy(&dqm->lock_hidden);
1011}
1012
1013static int start_nocpsch(struct device_queue_manager *dqm)
1014{
1015 pr_info("SW scheduler is used");
1016 init_interrupts(dqm);
1017
1018 if (dqm->dev->device_info->asic_family == CHIP_HAWAII)
1019 return pm_init(&dqm->packets, dqm);
1020 dqm->sched_running = true;
1021
1022 return 0;
1023}
1024
1025static int stop_nocpsch(struct device_queue_manager *dqm)
1026{
1027 if (dqm->dev->device_info->asic_family == CHIP_HAWAII)
1028 pm_uninit(&dqm->packets, false);
1029 dqm->sched_running = false;
1030
1031 return 0;
1032}
1033
1034static void pre_reset(struct device_queue_manager *dqm)
1035{
1036 dqm_lock(dqm);
1037 dqm->is_resetting = true;
1038 dqm_unlock(dqm);
1039}
1040
1041static int allocate_sdma_queue(struct device_queue_manager *dqm,
1042 struct queue *q)
1043{
1044 int bit;
1045
1046 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
1047 if (dqm->sdma_bitmap == 0) {
1048 pr_err("No more SDMA queue to allocate\n");
1049 return -ENOMEM;
1050 }
1051
1052 bit = __ffs64(dqm->sdma_bitmap);
1053 dqm->sdma_bitmap &= ~(1ULL << bit);
1054 q->sdma_id = bit;
1055 q->properties.sdma_engine_id = q->sdma_id %
1056 get_num_sdma_engines(dqm);
1057 q->properties.sdma_queue_id = q->sdma_id /
1058 get_num_sdma_engines(dqm);
1059 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
1060 if (dqm->xgmi_sdma_bitmap == 0) {
1061 pr_err("No more XGMI SDMA queue to allocate\n");
1062 return -ENOMEM;
1063 }
1064 bit = __ffs64(dqm->xgmi_sdma_bitmap);
1065 dqm->xgmi_sdma_bitmap &= ~(1ULL << bit);
1066 q->sdma_id = bit;
1067 /* sdma_engine_id is sdma id including
1068 * both PCIe-optimized SDMAs and XGMI-
1069 * optimized SDMAs. The calculation below
1070 * assumes the first N engines are always
1071 * PCIe-optimized ones
1072 */
1073 q->properties.sdma_engine_id = get_num_sdma_engines(dqm) +
1074 q->sdma_id % get_num_xgmi_sdma_engines(dqm);
1075 q->properties.sdma_queue_id = q->sdma_id /
1076 get_num_xgmi_sdma_engines(dqm);
1077 }
1078
1079 pr_debug("SDMA engine id: %d\n", q->properties.sdma_engine_id);
1080 pr_debug("SDMA queue id: %d\n", q->properties.sdma_queue_id);
1081
1082 return 0;
1083}
1084
1085static void deallocate_sdma_queue(struct device_queue_manager *dqm,
1086 struct queue *q)
1087{
1088 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
1089 if (q->sdma_id >= get_num_sdma_queues(dqm))
1090 return;
1091 dqm->sdma_bitmap |= (1ULL << q->sdma_id);
1092 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
1093 if (q->sdma_id >= get_num_xgmi_sdma_queues(dqm))
1094 return;
1095 dqm->xgmi_sdma_bitmap |= (1ULL << q->sdma_id);
1096 }
1097}
1098
1099/*
1100 * Device Queue Manager implementation for cp scheduler
1101 */
1102
1103static int set_sched_resources(struct device_queue_manager *dqm)
1104{
1105 int i, mec;
1106 struct scheduling_resources res;
1107
1108 res.vmid_mask = dqm->dev->shared_resources.compute_vmid_bitmap;
1109
1110 res.queue_mask = 0;
1111 for (i = 0; i < KGD_MAX_QUEUES; ++i) {
1112 mec = (i / dqm->dev->shared_resources.num_queue_per_pipe)
1113 / dqm->dev->shared_resources.num_pipe_per_mec;
1114
1115 if (!test_bit(i, dqm->dev->shared_resources.cp_queue_bitmap))
1116 continue;
1117
1118 /* only acquire queues from the first MEC */
1119 if (mec > 0)
1120 continue;
1121
1122 /* This situation may be hit in the future if a new HW
1123 * generation exposes more than 64 queues. If so, the
1124 * definition of res.queue_mask needs updating
1125 */
1126 if (WARN_ON(i >= (sizeof(res.queue_mask)*8))) {
1127 pr_err("Invalid queue enabled by amdgpu: %d\n", i);
1128 break;
1129 }
1130
1131 res.queue_mask |= 1ull
1132 << amdgpu_queue_mask_bit_to_set_resource_bit(
1133 (struct amdgpu_device *)dqm->dev->kgd, i);
1134 }
1135 res.gws_mask = ~0ull;
1136 res.oac_mask = res.gds_heap_base = res.gds_heap_size = 0;
1137
1138 pr_debug("Scheduling resources:\n"
1139 "vmid mask: 0x%8X\n"
1140 "queue mask: 0x%8llX\n",
1141 res.vmid_mask, res.queue_mask);
1142
1143 return pm_send_set_resources(&dqm->packets, &res);
1144}
1145
1146static int initialize_cpsch(struct device_queue_manager *dqm)
1147{
1148 pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
1149
1150 mutex_init(&dqm->lock_hidden);
1151 INIT_LIST_HEAD(&dqm->queues);
1152 dqm->active_queue_count = dqm->processes_count = 0;
1153 dqm->active_cp_queue_count = 0;
1154 dqm->gws_queue_count = 0;
1155 dqm->active_runlist = false;
1156 dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm));
1157 dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm));
1158
1159 INIT_WORK(&dqm->hw_exception_work, kfd_process_hw_exception);
1160
1161 return 0;
1162}
1163
1164static int start_cpsch(struct device_queue_manager *dqm)
1165{
1166 int retval;
1167
1168 retval = 0;
1169
1170 retval = pm_init(&dqm->packets, dqm);
1171 if (retval)
1172 goto fail_packet_manager_init;
1173
1174 retval = set_sched_resources(dqm);
1175 if (retval)
1176 goto fail_set_sched_resources;
1177
1178 pr_debug("Allocating fence memory\n");
1179
1180 /* allocate fence memory on the gart */
1181 retval = kfd_gtt_sa_allocate(dqm->dev, sizeof(*dqm->fence_addr),
1182 &dqm->fence_mem);
1183
1184 if (retval)
1185 goto fail_allocate_vidmem;
1186
1187 dqm->fence_addr = dqm->fence_mem->cpu_ptr;
1188 dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr;
1189
1190 init_interrupts(dqm);
1191
1192 dqm_lock(dqm);
1193 /* clear hang status when driver try to start the hw scheduler */
1194 dqm->is_hws_hang = false;
1195 dqm->is_resetting = false;
1196 dqm->sched_running = true;
1197 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
1198 dqm_unlock(dqm);
1199
1200 return 0;
1201fail_allocate_vidmem:
1202fail_set_sched_resources:
1203 pm_uninit(&dqm->packets, false);
1204fail_packet_manager_init:
1205 return retval;
1206}
1207
1208static int stop_cpsch(struct device_queue_manager *dqm)
1209{
1210 bool hanging;
1211
1212 dqm_lock(dqm);
1213 if (!dqm->is_hws_hang)
1214 unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
1215 hanging = dqm->is_hws_hang || dqm->is_resetting;
1216 dqm->sched_running = false;
1217 dqm_unlock(dqm);
1218
1219 pm_release_ib(&dqm->packets);
1220
1221 kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
1222 pm_uninit(&dqm->packets, hanging);
1223
1224 return 0;
1225}
1226
1227static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
1228 struct kernel_queue *kq,
1229 struct qcm_process_device *qpd)
1230{
1231 dqm_lock(dqm);
1232 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
1233 pr_warn("Can't create new kernel queue because %d queues were already created\n",
1234 dqm->total_queue_count);
1235 dqm_unlock(dqm);
1236 return -EPERM;
1237 }
1238
1239 /*
1240 * Unconditionally increment this counter, regardless of the queue's
1241 * type or whether the queue is active.
1242 */
1243 dqm->total_queue_count++;
1244 pr_debug("Total of %d queues are accountable so far\n",
1245 dqm->total_queue_count);
1246
1247 list_add(&kq->list, &qpd->priv_queue_list);
1248 increment_queue_count(dqm, kq->queue->properties.type);
1249 qpd->is_debug = true;
1250 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
1251 dqm_unlock(dqm);
1252
1253 return 0;
1254}
1255
1256static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
1257 struct kernel_queue *kq,
1258 struct qcm_process_device *qpd)
1259{
1260 dqm_lock(dqm);
1261 list_del(&kq->list);
1262 decrement_queue_count(dqm, kq->queue->properties.type);
1263 qpd->is_debug = false;
1264 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
1265 /*
1266 * Unconditionally decrement this counter, regardless of the queue's
1267 * type.
1268 */
1269 dqm->total_queue_count--;
1270 pr_debug("Total of %d queues are accountable so far\n",
1271 dqm->total_queue_count);
1272 dqm_unlock(dqm);
1273}
1274
1275static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
1276 struct qcm_process_device *qpd)
1277{
1278 int retval;
1279 struct mqd_manager *mqd_mgr;
1280
1281 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
1282 pr_warn("Can't create new usermode queue because %d queues were already created\n",
1283 dqm->total_queue_count);
1284 retval = -EPERM;
1285 goto out;
1286 }
1287
1288 if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
1289 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
1290 dqm_lock(dqm);
1291 retval = allocate_sdma_queue(dqm, q);
1292 dqm_unlock(dqm);
1293 if (retval)
1294 goto out;
1295 }
1296
1297 retval = allocate_doorbell(qpd, q);
1298 if (retval)
1299 goto out_deallocate_sdma_queue;
1300
1301 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
1302 q->properties.type)];
1303
1304 if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
1305 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
1306 dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
1307 q->properties.tba_addr = qpd->tba_addr;
1308 q->properties.tma_addr = qpd->tma_addr;
1309 q->mqd_mem_obj = mqd_mgr->allocate_mqd(mqd_mgr->dev, &q->properties);
1310 if (!q->mqd_mem_obj) {
1311 retval = -ENOMEM;
1312 goto out_deallocate_doorbell;
1313 }
1314
1315 dqm_lock(dqm);
1316 /*
1317 * Eviction state logic: mark all queues as evicted, even ones
1318 * not currently active. Restoring inactive queues later only
1319 * updates the is_evicted flag but is a no-op otherwise.
1320 */
1321 q->properties.is_evicted = !!qpd->evicted;
1322 mqd_mgr->init_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj,
1323 &q->gart_mqd_addr, &q->properties);
1324
1325 list_add(&q->list, &qpd->queues_list);
1326 qpd->queue_count++;
1327
1328 if (q->properties.is_active) {
1329 increment_queue_count(dqm, q->properties.type);
1330
1331 execute_queues_cpsch(dqm,
1332 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
1333 }
1334
1335 /*
1336 * Unconditionally increment this counter, regardless of the queue's
1337 * type or whether the queue is active.
1338 */
1339 dqm->total_queue_count++;
1340
1341 pr_debug("Total of %d queues are accountable so far\n",
1342 dqm->total_queue_count);
1343
1344 dqm_unlock(dqm);
1345 return retval;
1346
1347out_deallocate_doorbell:
1348 deallocate_doorbell(qpd, q);
1349out_deallocate_sdma_queue:
1350 if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
1351 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
1352 dqm_lock(dqm);
1353 deallocate_sdma_queue(dqm, q);
1354 dqm_unlock(dqm);
1355 }
1356out:
1357 return retval;
1358}
1359
1360int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
1361 unsigned int fence_value,
1362 unsigned int timeout_ms)
1363{
1364 unsigned long end_jiffies = msecs_to_jiffies(timeout_ms) + jiffies;
1365
1366 while (*fence_addr != fence_value) {
1367 if (time_after(jiffies, end_jiffies)) {
1368 pr_err("qcm fence wait loop timeout expired\n");
1369 /* In HWS case, this is used to halt the driver thread
1370 * in order not to mess up CP states before doing
1371 * scandumps for FW debugging.
1372 */
1373 while (halt_if_hws_hang)
1374 schedule();
1375
1376 return -ETIME;
1377 }
1378 schedule();
1379 }
1380
1381 return 0;
1382}
1383
1384/* dqm->lock mutex has to be locked before calling this function */
1385static int map_queues_cpsch(struct device_queue_manager *dqm)
1386{
1387 int retval;
1388
1389 if (!dqm->sched_running)
1390 return 0;
1391 if (dqm->active_queue_count <= 0 || dqm->processes_count <= 0)
1392 return 0;
1393 if (dqm->active_runlist)
1394 return 0;
1395
1396 retval = pm_send_runlist(&dqm->packets, &dqm->queues);
1397 pr_debug("%s sent runlist\n", __func__);
1398 if (retval) {
1399 pr_err("failed to execute runlist\n");
1400 return retval;
1401 }
1402 dqm->active_runlist = true;
1403
1404 return retval;
1405}
1406
1407/* dqm->lock mutex has to be locked before calling this function */
1408static int unmap_queues_cpsch(struct device_queue_manager *dqm,
1409 enum kfd_unmap_queues_filter filter,
1410 uint32_t filter_param)
1411{
1412 int retval = 0;
1413
1414 if (!dqm->sched_running)
1415 return 0;
1416 if (dqm->is_hws_hang)
1417 return -EIO;
1418 if (!dqm->active_runlist)
1419 return retval;
1420
1421 retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE,
1422 filter, filter_param, false, 0);
1423 if (retval)
1424 return retval;
1425
1426 *dqm->fence_addr = KFD_FENCE_INIT;
1427 pm_send_query_status(&dqm->packets, dqm->fence_gpu_addr,
1428 KFD_FENCE_COMPLETED);
1429 /* should be timed out */
1430 retval = amdkfd_fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED,
1431 queue_preemption_timeout_ms);
1432 if (retval) {
1433 pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n");
1434 dqm->is_hws_hang = true;
1435 /* It's possible we're detecting a HWS hang in the
1436 * middle of a GPU reset. No need to schedule another
1437 * reset in this case.
1438 */
1439 if (!dqm->is_resetting)
1440 schedule_work(&dqm->hw_exception_work);
1441 return retval;
1442 }
1443
1444 pm_release_ib(&dqm->packets);
1445 dqm->active_runlist = false;
1446
1447 return retval;
1448}
1449
1450/* dqm->lock mutex has to be locked before calling this function */
1451static int execute_queues_cpsch(struct device_queue_manager *dqm,
1452 enum kfd_unmap_queues_filter filter,
1453 uint32_t filter_param)
1454{
1455 int retval;
1456
1457 if (dqm->is_hws_hang)
1458 return -EIO;
1459 retval = unmap_queues_cpsch(dqm, filter, filter_param);
1460 if (retval)
1461 return retval;
1462
1463 return map_queues_cpsch(dqm);
1464}
1465
1466static int destroy_queue_cpsch(struct device_queue_manager *dqm,
1467 struct qcm_process_device *qpd,
1468 struct queue *q)
1469{
1470 int retval;
1471 struct mqd_manager *mqd_mgr;
1472 uint64_t sdma_val = 0;
1473 struct kfd_process_device *pdd = qpd_to_pdd(qpd);
1474
1475 /* Get the SDMA queue stats */
1476 if ((q->properties.type == KFD_QUEUE_TYPE_SDMA) ||
1477 (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
1478 retval = read_sdma_queue_counter((uint64_t)q->properties.read_ptr,
1479 &sdma_val);
1480 if (retval)
1481 pr_err("Failed to read SDMA queue counter for queue: %d\n",
1482 q->properties.queue_id);
1483 }
1484
1485 retval = 0;
1486
1487 /* remove queue from list to prevent rescheduling after preemption */
1488 dqm_lock(dqm);
1489
1490 if (qpd->is_debug) {
1491 /*
1492 * error, currently we do not allow to destroy a queue
1493 * of a currently debugged process
1494 */
1495 retval = -EBUSY;
1496 goto failed_try_destroy_debugged_queue;
1497
1498 }
1499
1500 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
1501 q->properties.type)];
1502
1503 deallocate_doorbell(qpd, q);
1504
1505 if ((q->properties.type == KFD_QUEUE_TYPE_SDMA) ||
1506 (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
1507 deallocate_sdma_queue(dqm, q);
1508 pdd->sdma_past_activity_counter += sdma_val;
1509 }
1510
1511 list_del(&q->list);
1512 qpd->queue_count--;
1513 if (q->properties.is_active) {
1514 decrement_queue_count(dqm, q->properties.type);
1515 retval = execute_queues_cpsch(dqm,
1516 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
1517 if (retval == -ETIME)
1518 qpd->reset_wavefronts = true;
1519 if (q->properties.is_gws) {
1520 dqm->gws_queue_count--;
1521 qpd->mapped_gws_queue = false;
1522 }
1523 }
1524
1525 /*
1526 * Unconditionally decrement this counter, regardless of the queue's
1527 * type
1528 */
1529 dqm->total_queue_count--;
1530 pr_debug("Total of %d queues are accountable so far\n",
1531 dqm->total_queue_count);
1532
1533 dqm_unlock(dqm);
1534
1535 /* Do free_mqd after dqm_unlock(dqm) to avoid circular locking */
1536 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
1537
1538 return retval;
1539
1540failed_try_destroy_debugged_queue:
1541
1542 dqm_unlock(dqm);
1543 return retval;
1544}
1545
1546/*
1547 * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to
1548 * stay in user mode.
1549 */
1550#define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL
1551/* APE1 limit is inclusive and 64K aligned. */
1552#define APE1_LIMIT_ALIGNMENT 0xFFFF
1553
1554static bool set_cache_memory_policy(struct device_queue_manager *dqm,
1555 struct qcm_process_device *qpd,
1556 enum cache_policy default_policy,
1557 enum cache_policy alternate_policy,
1558 void __user *alternate_aperture_base,
1559 uint64_t alternate_aperture_size)
1560{
1561 bool retval = true;
1562
1563 if (!dqm->asic_ops.set_cache_memory_policy)
1564 return retval;
1565
1566 dqm_lock(dqm);
1567
1568 if (alternate_aperture_size == 0) {
1569 /* base > limit disables APE1 */
1570 qpd->sh_mem_ape1_base = 1;
1571 qpd->sh_mem_ape1_limit = 0;
1572 } else {
1573 /*
1574 * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]},
1575 * SH_MEM_APE1_BASE[31:0], 0x0000 }
1576 * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]},
1577 * SH_MEM_APE1_LIMIT[31:0], 0xFFFF }
1578 * Verify that the base and size parameters can be
1579 * represented in this format and convert them.
1580 * Additionally restrict APE1 to user-mode addresses.
1581 */
1582
1583 uint64_t base = (uintptr_t)alternate_aperture_base;
1584 uint64_t limit = base + alternate_aperture_size - 1;
1585
1586 if (limit <= base || (base & APE1_FIXED_BITS_MASK) != 0 ||
1587 (limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT) {
1588 retval = false;
1589 goto out;
1590 }
1591
1592 qpd->sh_mem_ape1_base = base >> 16;
1593 qpd->sh_mem_ape1_limit = limit >> 16;
1594 }
1595
1596 retval = dqm->asic_ops.set_cache_memory_policy(
1597 dqm,
1598 qpd,
1599 default_policy,
1600 alternate_policy,
1601 alternate_aperture_base,
1602 alternate_aperture_size);
1603
1604 if ((dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0))
1605 program_sh_mem_settings(dqm, qpd);
1606
1607 pr_debug("sh_mem_config: 0x%x, ape1_base: 0x%x, ape1_limit: 0x%x\n",
1608 qpd->sh_mem_config, qpd->sh_mem_ape1_base,
1609 qpd->sh_mem_ape1_limit);
1610
1611out:
1612 dqm_unlock(dqm);
1613 return retval;
1614}
1615
1616static int set_trap_handler(struct device_queue_manager *dqm,
1617 struct qcm_process_device *qpd,
1618 uint64_t tba_addr,
1619 uint64_t tma_addr)
1620{
1621 uint64_t *tma;
1622
1623 if (dqm->dev->cwsr_enabled) {
1624 /* Jump from CWSR trap handler to user trap */
1625 tma = (uint64_t *)(qpd->cwsr_kaddr + KFD_CWSR_TMA_OFFSET);
1626 tma[0] = tba_addr;
1627 tma[1] = tma_addr;
1628 } else {
1629 qpd->tba_addr = tba_addr;
1630 qpd->tma_addr = tma_addr;
1631 }
1632
1633 return 0;
1634}
1635
1636static int process_termination_nocpsch(struct device_queue_manager *dqm,
1637 struct qcm_process_device *qpd)
1638{
1639 struct queue *q, *next;
1640 struct device_process_node *cur, *next_dpn;
1641 int retval = 0;
1642 bool found = false;
1643
1644 dqm_lock(dqm);
1645
1646 /* Clear all user mode queues */
1647 list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
1648 int ret;
1649
1650 ret = destroy_queue_nocpsch_locked(dqm, qpd, q);
1651 if (ret)
1652 retval = ret;
1653 }
1654
1655 /* Unregister process */
1656 list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) {
1657 if (qpd == cur->qpd) {
1658 list_del(&cur->list);
1659 kfree(cur);
1660 dqm->processes_count--;
1661 found = true;
1662 break;
1663 }
1664 }
1665
1666 dqm_unlock(dqm);
1667
1668 /* Outside the DQM lock because under the DQM lock we can't do
1669 * reclaim or take other locks that others hold while reclaiming.
1670 */
1671 if (found)
1672 kfd_dec_compute_active(dqm->dev);
1673
1674 return retval;
1675}
1676
1677static int get_wave_state(struct device_queue_manager *dqm,
1678 struct queue *q,
1679 void __user *ctl_stack,
1680 u32 *ctl_stack_used_size,
1681 u32 *save_area_used_size)
1682{
1683 struct mqd_manager *mqd_mgr;
1684 int r;
1685
1686 dqm_lock(dqm);
1687
1688 if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE ||
1689 q->properties.is_active || !q->device->cwsr_enabled) {
1690 r = -EINVAL;
1691 goto dqm_unlock;
1692 }
1693
1694 mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_CP];
1695
1696 if (!mqd_mgr->get_wave_state) {
1697 r = -EINVAL;
1698 goto dqm_unlock;
1699 }
1700
1701 r = mqd_mgr->get_wave_state(mqd_mgr, q->mqd, ctl_stack,
1702 ctl_stack_used_size, save_area_used_size);
1703
1704dqm_unlock:
1705 dqm_unlock(dqm);
1706 return r;
1707}
1708
1709static int process_termination_cpsch(struct device_queue_manager *dqm,
1710 struct qcm_process_device *qpd)
1711{
1712 int retval;
1713 struct queue *q, *next;
1714 struct kernel_queue *kq, *kq_next;
1715 struct mqd_manager *mqd_mgr;
1716 struct device_process_node *cur, *next_dpn;
1717 enum kfd_unmap_queues_filter filter =
1718 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES;
1719 bool found = false;
1720
1721 retval = 0;
1722
1723 dqm_lock(dqm);
1724
1725 /* Clean all kernel queues */
1726 list_for_each_entry_safe(kq, kq_next, &qpd->priv_queue_list, list) {
1727 list_del(&kq->list);
1728 decrement_queue_count(dqm, kq->queue->properties.type);
1729 qpd->is_debug = false;
1730 dqm->total_queue_count--;
1731 filter = KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES;
1732 }
1733
1734 /* Clear all user mode queues */
1735 list_for_each_entry(q, &qpd->queues_list, list) {
1736 if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
1737 deallocate_sdma_queue(dqm, q);
1738 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
1739 deallocate_sdma_queue(dqm, q);
1740
1741 if (q->properties.is_active) {
1742 decrement_queue_count(dqm, q->properties.type);
1743 if (q->properties.is_gws) {
1744 dqm->gws_queue_count--;
1745 qpd->mapped_gws_queue = false;
1746 }
1747 }
1748
1749 dqm->total_queue_count--;
1750 }
1751
1752 /* Unregister process */
1753 list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) {
1754 if (qpd == cur->qpd) {
1755 list_del(&cur->list);
1756 kfree(cur);
1757 dqm->processes_count--;
1758 found = true;
1759 break;
1760 }
1761 }
1762
1763 retval = execute_queues_cpsch(dqm, filter, 0);
1764 if ((!dqm->is_hws_hang) && (retval || qpd->reset_wavefronts)) {
1765 pr_warn("Resetting wave fronts (cpsch) on dev %p\n", dqm->dev);
1766 dbgdev_wave_reset_wavefronts(dqm->dev, qpd->pqm->process);
1767 qpd->reset_wavefronts = false;
1768 }
1769
1770 dqm_unlock(dqm);
1771
1772 /* Outside the DQM lock because under the DQM lock we can't do
1773 * reclaim or take other locks that others hold while reclaiming.
1774 */
1775 if (found)
1776 kfd_dec_compute_active(dqm->dev);
1777
1778 /* Lastly, free mqd resources.
1779 * Do free_mqd() after dqm_unlock to avoid circular locking.
1780 */
1781 list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
1782 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
1783 q->properties.type)];
1784 list_del(&q->list);
1785 qpd->queue_count--;
1786 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
1787 }
1788
1789 return retval;
1790}
1791
1792static int init_mqd_managers(struct device_queue_manager *dqm)
1793{
1794 int i, j;
1795 struct mqd_manager *mqd_mgr;
1796
1797 for (i = 0; i < KFD_MQD_TYPE_MAX; i++) {
1798 mqd_mgr = dqm->asic_ops.mqd_manager_init(i, dqm->dev);
1799 if (!mqd_mgr) {
1800 pr_err("mqd manager [%d] initialization failed\n", i);
1801 goto out_free;
1802 }
1803 dqm->mqd_mgrs[i] = mqd_mgr;
1804 }
1805
1806 return 0;
1807
1808out_free:
1809 for (j = 0; j < i; j++) {
1810 kfree(dqm->mqd_mgrs[j]);
1811 dqm->mqd_mgrs[j] = NULL;
1812 }
1813
1814 return -ENOMEM;
1815}
1816
1817/* Allocate one hiq mqd (HWS) and all SDMA mqd in a continuous trunk*/
1818static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm)
1819{
1820 int retval;
1821 struct kfd_dev *dev = dqm->dev;
1822 struct kfd_mem_obj *mem_obj = &dqm->hiq_sdma_mqd;
1823 uint32_t size = dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size *
1824 get_num_all_sdma_engines(dqm) *
1825 dev->device_info->num_sdma_queues_per_engine +
1826 dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size;
1827
1828 retval = amdgpu_amdkfd_alloc_gtt_mem(dev->kgd, size,
1829 &(mem_obj->gtt_mem), &(mem_obj->gpu_addr),
1830 (void *)&(mem_obj->cpu_ptr), false);
1831
1832 return retval;
1833}
1834
1835struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
1836{
1837 struct device_queue_manager *dqm;
1838
1839 pr_debug("Loading device queue manager\n");
1840
1841 dqm = kzalloc(sizeof(*dqm), GFP_KERNEL);
1842 if (!dqm)
1843 return NULL;
1844
1845 switch (dev->device_info->asic_family) {
1846 /* HWS is not available on Hawaii. */
1847 case CHIP_HAWAII:
1848 /* HWS depends on CWSR for timely dequeue. CWSR is not
1849 * available on Tonga.
1850 *
1851 * FIXME: This argument also applies to Kaveri.
1852 */
1853 case CHIP_TONGA:
1854 dqm->sched_policy = KFD_SCHED_POLICY_NO_HWS;
1855 break;
1856 default:
1857 dqm->sched_policy = sched_policy;
1858 break;
1859 }
1860
1861 dqm->dev = dev;
1862 switch (dqm->sched_policy) {
1863 case KFD_SCHED_POLICY_HWS:
1864 case KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION:
1865 /* initialize dqm for cp scheduling */
1866 dqm->ops.create_queue = create_queue_cpsch;
1867 dqm->ops.initialize = initialize_cpsch;
1868 dqm->ops.start = start_cpsch;
1869 dqm->ops.stop = stop_cpsch;
1870 dqm->ops.pre_reset = pre_reset;
1871 dqm->ops.destroy_queue = destroy_queue_cpsch;
1872 dqm->ops.update_queue = update_queue;
1873 dqm->ops.register_process = register_process;
1874 dqm->ops.unregister_process = unregister_process;
1875 dqm->ops.uninitialize = uninitialize;
1876 dqm->ops.create_kernel_queue = create_kernel_queue_cpsch;
1877 dqm->ops.destroy_kernel_queue = destroy_kernel_queue_cpsch;
1878 dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
1879 dqm->ops.set_trap_handler = set_trap_handler;
1880 dqm->ops.process_termination = process_termination_cpsch;
1881 dqm->ops.evict_process_queues = evict_process_queues_cpsch;
1882 dqm->ops.restore_process_queues = restore_process_queues_cpsch;
1883 dqm->ops.get_wave_state = get_wave_state;
1884 break;
1885 case KFD_SCHED_POLICY_NO_HWS:
1886 /* initialize dqm for no cp scheduling */
1887 dqm->ops.start = start_nocpsch;
1888 dqm->ops.stop = stop_nocpsch;
1889 dqm->ops.pre_reset = pre_reset;
1890 dqm->ops.create_queue = create_queue_nocpsch;
1891 dqm->ops.destroy_queue = destroy_queue_nocpsch;
1892 dqm->ops.update_queue = update_queue;
1893 dqm->ops.register_process = register_process;
1894 dqm->ops.unregister_process = unregister_process;
1895 dqm->ops.initialize = initialize_nocpsch;
1896 dqm->ops.uninitialize = uninitialize;
1897 dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
1898 dqm->ops.set_trap_handler = set_trap_handler;
1899 dqm->ops.process_termination = process_termination_nocpsch;
1900 dqm->ops.evict_process_queues = evict_process_queues_nocpsch;
1901 dqm->ops.restore_process_queues =
1902 restore_process_queues_nocpsch;
1903 dqm->ops.get_wave_state = get_wave_state;
1904 break;
1905 default:
1906 pr_err("Invalid scheduling policy %d\n", dqm->sched_policy);
1907 goto out_free;
1908 }
1909
1910 switch (dev->device_info->asic_family) {
1911 case CHIP_CARRIZO:
1912 device_queue_manager_init_vi(&dqm->asic_ops);
1913 break;
1914
1915 case CHIP_KAVERI:
1916 device_queue_manager_init_cik(&dqm->asic_ops);
1917 break;
1918
1919 case CHIP_HAWAII:
1920 device_queue_manager_init_cik_hawaii(&dqm->asic_ops);
1921 break;
1922
1923 case CHIP_TONGA:
1924 case CHIP_FIJI:
1925 case CHIP_POLARIS10:
1926 case CHIP_POLARIS11:
1927 case CHIP_POLARIS12:
1928 case CHIP_VEGAM:
1929 device_queue_manager_init_vi_tonga(&dqm->asic_ops);
1930 break;
1931
1932 case CHIP_VEGA10:
1933 case CHIP_VEGA12:
1934 case CHIP_VEGA20:
1935 case CHIP_RAVEN:
1936 case CHIP_RENOIR:
1937 case CHIP_ARCTURUS:
1938 device_queue_manager_init_v9(&dqm->asic_ops);
1939 break;
1940 case CHIP_NAVI10:
1941 case CHIP_NAVI12:
1942 case CHIP_NAVI14:
1943 case CHIP_SIENNA_CICHLID:
1944 case CHIP_NAVY_FLOUNDER:
1945 device_queue_manager_init_v10_navi10(&dqm->asic_ops);
1946 break;
1947 default:
1948 WARN(1, "Unexpected ASIC family %u",
1949 dev->device_info->asic_family);
1950 goto out_free;
1951 }
1952
1953 if (init_mqd_managers(dqm))
1954 goto out_free;
1955
1956 if (allocate_hiq_sdma_mqd(dqm)) {
1957 pr_err("Failed to allocate hiq sdma mqd trunk buffer\n");
1958 goto out_free;
1959 }
1960
1961 if (!dqm->ops.initialize(dqm))
1962 return dqm;
1963
1964out_free:
1965 kfree(dqm);
1966 return NULL;
1967}
1968
1969static void deallocate_hiq_sdma_mqd(struct kfd_dev *dev,
1970 struct kfd_mem_obj *mqd)
1971{
1972 WARN(!mqd, "No hiq sdma mqd trunk to free");
1973
1974 amdgpu_amdkfd_free_gtt_mem(dev->kgd, mqd->gtt_mem);
1975}
1976
1977void device_queue_manager_uninit(struct device_queue_manager *dqm)
1978{
1979 dqm->ops.uninitialize(dqm);
1980 deallocate_hiq_sdma_mqd(dqm->dev, &dqm->hiq_sdma_mqd);
1981 kfree(dqm);
1982}
1983
1984int kfd_process_vm_fault(struct device_queue_manager *dqm,
1985 unsigned int pasid)
1986{
1987 struct kfd_process_device *pdd;
1988 struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
1989 int ret = 0;
1990
1991 if (!p)
1992 return -EINVAL;
1993 pdd = kfd_get_process_device_data(dqm->dev, p);
1994 if (pdd)
1995 ret = dqm->ops.evict_process_queues(dqm, &pdd->qpd);
1996 kfd_unref_process(p);
1997
1998 return ret;
1999}
2000
2001static void kfd_process_hw_exception(struct work_struct *work)
2002{
2003 struct device_queue_manager *dqm = container_of(work,
2004 struct device_queue_manager, hw_exception_work);
2005 amdgpu_amdkfd_gpu_reset(dqm->dev->kgd);
2006}
2007
2008#if defined(CONFIG_DEBUG_FS)
2009
2010static void seq_reg_dump(struct seq_file *m,
2011 uint32_t (*dump)[2], uint32_t n_regs)
2012{
2013 uint32_t i, count;
2014
2015 for (i = 0, count = 0; i < n_regs; i++) {
2016 if (count == 0 ||
2017 dump[i-1][0] + sizeof(uint32_t) != dump[i][0]) {
2018 seq_printf(m, "%s %08x: %08x",
2019 i ? "\n" : "",
2020 dump[i][0], dump[i][1]);
2021 count = 7;
2022 } else {
2023 seq_printf(m, " %08x", dump[i][1]);
2024 count--;
2025 }
2026 }
2027
2028 seq_puts(m, "\n");
2029}
2030
2031int dqm_debugfs_hqds(struct seq_file *m, void *data)
2032{
2033 struct device_queue_manager *dqm = data;
2034 uint32_t (*dump)[2], n_regs;
2035 int pipe, queue;
2036 int r = 0;
2037
2038 if (!dqm->sched_running) {
2039 seq_printf(m, " Device is stopped\n");
2040
2041 return 0;
2042 }
2043
2044 r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->kgd,
2045 KFD_CIK_HIQ_PIPE, KFD_CIK_HIQ_QUEUE,
2046 &dump, &n_regs);
2047 if (!r) {
2048 seq_printf(m, " HIQ on MEC %d Pipe %d Queue %d\n",
2049 KFD_CIK_HIQ_PIPE/get_pipes_per_mec(dqm)+1,
2050 KFD_CIK_HIQ_PIPE%get_pipes_per_mec(dqm),
2051 KFD_CIK_HIQ_QUEUE);
2052 seq_reg_dump(m, dump, n_regs);
2053
2054 kfree(dump);
2055 }
2056
2057 for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
2058 int pipe_offset = pipe * get_queues_per_pipe(dqm);
2059
2060 for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) {
2061 if (!test_bit(pipe_offset + queue,
2062 dqm->dev->shared_resources.cp_queue_bitmap))
2063 continue;
2064
2065 r = dqm->dev->kfd2kgd->hqd_dump(
2066 dqm->dev->kgd, pipe, queue, &dump, &n_regs);
2067 if (r)
2068 break;
2069
2070 seq_printf(m, " CP Pipe %d, Queue %d\n",
2071 pipe, queue);
2072 seq_reg_dump(m, dump, n_regs);
2073
2074 kfree(dump);
2075 }
2076 }
2077
2078 for (pipe = 0; pipe < get_num_all_sdma_engines(dqm); pipe++) {
2079 for (queue = 0;
2080 queue < dqm->dev->device_info->num_sdma_queues_per_engine;
2081 queue++) {
2082 r = dqm->dev->kfd2kgd->hqd_sdma_dump(
2083 dqm->dev->kgd, pipe, queue, &dump, &n_regs);
2084 if (r)
2085 break;
2086
2087 seq_printf(m, " SDMA Engine %d, RLC %d\n",
2088 pipe, queue);
2089 seq_reg_dump(m, dump, n_regs);
2090
2091 kfree(dump);
2092 }
2093 }
2094
2095 return r;
2096}
2097
2098int dqm_debugfs_execute_queues(struct device_queue_manager *dqm)
2099{
2100 int r = 0;
2101
2102 dqm_lock(dqm);
2103 dqm->active_runlist = true;
2104 r = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
2105 dqm_unlock(dqm);
2106
2107 return r;
2108}
2109
2110#endif
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/ratelimit.h>
25#include <linux/printk.h>
26#include <linux/slab.h>
27#include <linux/list.h>
28#include <linux/types.h>
29#include <linux/bitops.h>
30#include <linux/sched.h>
31#include "kfd_priv.h"
32#include "kfd_device_queue_manager.h"
33#include "kfd_mqd_manager.h"
34#include "cik_regs.h"
35#include "kfd_kernel_queue.h"
36#include "amdgpu_amdkfd.h"
37
38/* Size of the per-pipe EOP queue */
39#define CIK_HPD_EOP_BYTES_LOG2 11
40#define CIK_HPD_EOP_BYTES (1U << CIK_HPD_EOP_BYTES_LOG2)
41
42static int set_pasid_vmid_mapping(struct device_queue_manager *dqm,
43 unsigned int pasid, unsigned int vmid);
44
45static int execute_queues_cpsch(struct device_queue_manager *dqm,
46 enum kfd_unmap_queues_filter filter,
47 uint32_t filter_param);
48static int unmap_queues_cpsch(struct device_queue_manager *dqm,
49 enum kfd_unmap_queues_filter filter,
50 uint32_t filter_param);
51
52static int map_queues_cpsch(struct device_queue_manager *dqm);
53
54static void deallocate_sdma_queue(struct device_queue_manager *dqm,
55 struct queue *q);
56
57static inline void deallocate_hqd(struct device_queue_manager *dqm,
58 struct queue *q);
59static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q);
60static int allocate_sdma_queue(struct device_queue_manager *dqm,
61 struct queue *q);
62static void kfd_process_hw_exception(struct work_struct *work);
63
64static inline
65enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type)
66{
67 if (type == KFD_QUEUE_TYPE_SDMA || type == KFD_QUEUE_TYPE_SDMA_XGMI)
68 return KFD_MQD_TYPE_SDMA;
69 return KFD_MQD_TYPE_CP;
70}
71
72static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe)
73{
74 int i;
75 int pipe_offset = mec * dqm->dev->shared_resources.num_pipe_per_mec
76 + pipe * dqm->dev->shared_resources.num_queue_per_pipe;
77
78 /* queue is available for KFD usage if bit is 1 */
79 for (i = 0; i < dqm->dev->shared_resources.num_queue_per_pipe; ++i)
80 if (test_bit(pipe_offset + i,
81 dqm->dev->shared_resources.queue_bitmap))
82 return true;
83 return false;
84}
85
86unsigned int get_queues_num(struct device_queue_manager *dqm)
87{
88 return bitmap_weight(dqm->dev->shared_resources.queue_bitmap,
89 KGD_MAX_QUEUES);
90}
91
92unsigned int get_queues_per_pipe(struct device_queue_manager *dqm)
93{
94 return dqm->dev->shared_resources.num_queue_per_pipe;
95}
96
97unsigned int get_pipes_per_mec(struct device_queue_manager *dqm)
98{
99 return dqm->dev->shared_resources.num_pipe_per_mec;
100}
101
102static unsigned int get_num_sdma_engines(struct device_queue_manager *dqm)
103{
104 return dqm->dev->device_info->num_sdma_engines;
105}
106
107static unsigned int get_num_xgmi_sdma_engines(struct device_queue_manager *dqm)
108{
109 return dqm->dev->device_info->num_xgmi_sdma_engines;
110}
111
112unsigned int get_num_sdma_queues(struct device_queue_manager *dqm)
113{
114 return dqm->dev->device_info->num_sdma_engines
115 * dqm->dev->device_info->num_sdma_queues_per_engine;
116}
117
118unsigned int get_num_xgmi_sdma_queues(struct device_queue_manager *dqm)
119{
120 return dqm->dev->device_info->num_xgmi_sdma_engines
121 * dqm->dev->device_info->num_sdma_queues_per_engine;
122}
123
124void program_sh_mem_settings(struct device_queue_manager *dqm,
125 struct qcm_process_device *qpd)
126{
127 return dqm->dev->kfd2kgd->program_sh_mem_settings(
128 dqm->dev->kgd, qpd->vmid,
129 qpd->sh_mem_config,
130 qpd->sh_mem_ape1_base,
131 qpd->sh_mem_ape1_limit,
132 qpd->sh_mem_bases);
133}
134
135static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q)
136{
137 struct kfd_dev *dev = qpd->dqm->dev;
138
139 if (!KFD_IS_SOC15(dev->device_info->asic_family)) {
140 /* On pre-SOC15 chips we need to use the queue ID to
141 * preserve the user mode ABI.
142 */
143 q->doorbell_id = q->properties.queue_id;
144 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
145 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
146 /* For SDMA queues on SOC15 with 8-byte doorbell, use static
147 * doorbell assignments based on the engine and queue id.
148 * The doobell index distance between RLC (2*i) and (2*i+1)
149 * for a SDMA engine is 512.
150 */
151 uint32_t *idx_offset =
152 dev->shared_resources.sdma_doorbell_idx;
153
154 q->doorbell_id = idx_offset[q->properties.sdma_engine_id]
155 + (q->properties.sdma_queue_id & 1)
156 * KFD_QUEUE_DOORBELL_MIRROR_OFFSET
157 + (q->properties.sdma_queue_id >> 1);
158 } else {
159 /* For CP queues on SOC15 reserve a free doorbell ID */
160 unsigned int found;
161
162 found = find_first_zero_bit(qpd->doorbell_bitmap,
163 KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
164 if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) {
165 pr_debug("No doorbells available");
166 return -EBUSY;
167 }
168 set_bit(found, qpd->doorbell_bitmap);
169 q->doorbell_id = found;
170 }
171
172 q->properties.doorbell_off =
173 kfd_doorbell_id_to_offset(dev, q->process,
174 q->doorbell_id);
175
176 return 0;
177}
178
179static void deallocate_doorbell(struct qcm_process_device *qpd,
180 struct queue *q)
181{
182 unsigned int old;
183 struct kfd_dev *dev = qpd->dqm->dev;
184
185 if (!KFD_IS_SOC15(dev->device_info->asic_family) ||
186 q->properties.type == KFD_QUEUE_TYPE_SDMA ||
187 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
188 return;
189
190 old = test_and_clear_bit(q->doorbell_id, qpd->doorbell_bitmap);
191 WARN_ON(!old);
192}
193
194static int allocate_vmid(struct device_queue_manager *dqm,
195 struct qcm_process_device *qpd,
196 struct queue *q)
197{
198 int bit, allocated_vmid;
199
200 if (dqm->vmid_bitmap == 0)
201 return -ENOMEM;
202
203 bit = ffs(dqm->vmid_bitmap) - 1;
204 dqm->vmid_bitmap &= ~(1 << bit);
205
206 allocated_vmid = bit + dqm->dev->vm_info.first_vmid_kfd;
207 pr_debug("vmid allocation %d\n", allocated_vmid);
208 qpd->vmid = allocated_vmid;
209 q->properties.vmid = allocated_vmid;
210
211 set_pasid_vmid_mapping(dqm, q->process->pasid, q->properties.vmid);
212 program_sh_mem_settings(dqm, qpd);
213
214 /* qpd->page_table_base is set earlier when register_process()
215 * is called, i.e. when the first queue is created.
216 */
217 dqm->dev->kfd2kgd->set_vm_context_page_table_base(dqm->dev->kgd,
218 qpd->vmid,
219 qpd->page_table_base);
220 /* invalidate the VM context after pasid and vmid mapping is set up */
221 kfd_flush_tlb(qpd_to_pdd(qpd));
222
223 dqm->dev->kfd2kgd->set_scratch_backing_va(
224 dqm->dev->kgd, qpd->sh_hidden_private_base, qpd->vmid);
225
226 return 0;
227}
228
229static int flush_texture_cache_nocpsch(struct kfd_dev *kdev,
230 struct qcm_process_device *qpd)
231{
232 const struct packet_manager_funcs *pmf = qpd->dqm->packets.pmf;
233 int ret;
234
235 if (!qpd->ib_kaddr)
236 return -ENOMEM;
237
238 ret = pmf->release_mem(qpd->ib_base, (uint32_t *)qpd->ib_kaddr);
239 if (ret)
240 return ret;
241
242 return amdgpu_amdkfd_submit_ib(kdev->kgd, KGD_ENGINE_MEC1, qpd->vmid,
243 qpd->ib_base, (uint32_t *)qpd->ib_kaddr,
244 pmf->release_mem_size / sizeof(uint32_t));
245}
246
247static void deallocate_vmid(struct device_queue_manager *dqm,
248 struct qcm_process_device *qpd,
249 struct queue *q)
250{
251 int bit = qpd->vmid - dqm->dev->vm_info.first_vmid_kfd;
252
253 /* On GFX v7, CP doesn't flush TC at dequeue */
254 if (q->device->device_info->asic_family == CHIP_HAWAII)
255 if (flush_texture_cache_nocpsch(q->device, qpd))
256 pr_err("Failed to flush TC\n");
257
258 kfd_flush_tlb(qpd_to_pdd(qpd));
259
260 /* Release the vmid mapping */
261 set_pasid_vmid_mapping(dqm, 0, qpd->vmid);
262
263 dqm->vmid_bitmap |= (1 << bit);
264 qpd->vmid = 0;
265 q->properties.vmid = 0;
266}
267
268static int create_queue_nocpsch(struct device_queue_manager *dqm,
269 struct queue *q,
270 struct qcm_process_device *qpd)
271{
272 struct mqd_manager *mqd_mgr;
273 int retval;
274
275 print_queue(q);
276
277 dqm_lock(dqm);
278
279 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
280 pr_warn("Can't create new usermode queue because %d queues were already created\n",
281 dqm->total_queue_count);
282 retval = -EPERM;
283 goto out_unlock;
284 }
285
286 if (list_empty(&qpd->queues_list)) {
287 retval = allocate_vmid(dqm, qpd, q);
288 if (retval)
289 goto out_unlock;
290 }
291 q->properties.vmid = qpd->vmid;
292 /*
293 * Eviction state logic: mark all queues as evicted, even ones
294 * not currently active. Restoring inactive queues later only
295 * updates the is_evicted flag but is a no-op otherwise.
296 */
297 q->properties.is_evicted = !!qpd->evicted;
298
299 q->properties.tba_addr = qpd->tba_addr;
300 q->properties.tma_addr = qpd->tma_addr;
301
302 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
303 q->properties.type)];
304 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
305 retval = allocate_hqd(dqm, q);
306 if (retval)
307 goto deallocate_vmid;
308 pr_debug("Loading mqd to hqd on pipe %d, queue %d\n",
309 q->pipe, q->queue);
310 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
311 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
312 retval = allocate_sdma_queue(dqm, q);
313 if (retval)
314 goto deallocate_vmid;
315 dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
316 }
317
318 retval = allocate_doorbell(qpd, q);
319 if (retval)
320 goto out_deallocate_hqd;
321
322 /* Temporarily release dqm lock to avoid a circular lock dependency */
323 dqm_unlock(dqm);
324 q->mqd_mem_obj = mqd_mgr->allocate_mqd(mqd_mgr->dev, &q->properties);
325 dqm_lock(dqm);
326
327 if (!q->mqd_mem_obj) {
328 retval = -ENOMEM;
329 goto out_deallocate_doorbell;
330 }
331 mqd_mgr->init_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj,
332 &q->gart_mqd_addr, &q->properties);
333 if (q->properties.is_active) {
334
335 if (WARN(q->process->mm != current->mm,
336 "should only run in user thread"))
337 retval = -EFAULT;
338 else
339 retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
340 q->queue, &q->properties, current->mm);
341 if (retval)
342 goto out_free_mqd;
343 }
344
345 list_add(&q->list, &qpd->queues_list);
346 qpd->queue_count++;
347 if (q->properties.is_active)
348 dqm->queue_count++;
349
350 if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
351 dqm->sdma_queue_count++;
352 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
353 dqm->xgmi_sdma_queue_count++;
354
355 /*
356 * Unconditionally increment this counter, regardless of the queue's
357 * type or whether the queue is active.
358 */
359 dqm->total_queue_count++;
360 pr_debug("Total of %d queues are accountable so far\n",
361 dqm->total_queue_count);
362 goto out_unlock;
363
364out_free_mqd:
365 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
366out_deallocate_doorbell:
367 deallocate_doorbell(qpd, q);
368out_deallocate_hqd:
369 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
370 deallocate_hqd(dqm, q);
371 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
372 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
373 deallocate_sdma_queue(dqm, q);
374deallocate_vmid:
375 if (list_empty(&qpd->queues_list))
376 deallocate_vmid(dqm, qpd, q);
377out_unlock:
378 dqm_unlock(dqm);
379 return retval;
380}
381
382static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
383{
384 bool set;
385 int pipe, bit, i;
386
387 set = false;
388
389 for (pipe = dqm->next_pipe_to_allocate, i = 0;
390 i < get_pipes_per_mec(dqm);
391 pipe = ((pipe + 1) % get_pipes_per_mec(dqm)), ++i) {
392
393 if (!is_pipe_enabled(dqm, 0, pipe))
394 continue;
395
396 if (dqm->allocated_queues[pipe] != 0) {
397 bit = ffs(dqm->allocated_queues[pipe]) - 1;
398 dqm->allocated_queues[pipe] &= ~(1 << bit);
399 q->pipe = pipe;
400 q->queue = bit;
401 set = true;
402 break;
403 }
404 }
405
406 if (!set)
407 return -EBUSY;
408
409 pr_debug("hqd slot - pipe %d, queue %d\n", q->pipe, q->queue);
410 /* horizontal hqd allocation */
411 dqm->next_pipe_to_allocate = (pipe + 1) % get_pipes_per_mec(dqm);
412
413 return 0;
414}
415
416static inline void deallocate_hqd(struct device_queue_manager *dqm,
417 struct queue *q)
418{
419 dqm->allocated_queues[q->pipe] |= (1 << q->queue);
420}
421
422/* Access to DQM has to be locked before calling destroy_queue_nocpsch_locked
423 * to avoid asynchronized access
424 */
425static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
426 struct qcm_process_device *qpd,
427 struct queue *q)
428{
429 int retval;
430 struct mqd_manager *mqd_mgr;
431
432 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
433 q->properties.type)];
434
435 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
436 deallocate_hqd(dqm, q);
437 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
438 dqm->sdma_queue_count--;
439 deallocate_sdma_queue(dqm, q);
440 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
441 dqm->xgmi_sdma_queue_count--;
442 deallocate_sdma_queue(dqm, q);
443 } else {
444 pr_debug("q->properties.type %d is invalid\n",
445 q->properties.type);
446 return -EINVAL;
447 }
448 dqm->total_queue_count--;
449
450 deallocate_doorbell(qpd, q);
451
452 retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
453 KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
454 KFD_UNMAP_LATENCY_MS,
455 q->pipe, q->queue);
456 if (retval == -ETIME)
457 qpd->reset_wavefronts = true;
458
459 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
460
461 list_del(&q->list);
462 if (list_empty(&qpd->queues_list)) {
463 if (qpd->reset_wavefronts) {
464 pr_warn("Resetting wave fronts (nocpsch) on dev %p\n",
465 dqm->dev);
466 /* dbgdev_wave_reset_wavefronts has to be called before
467 * deallocate_vmid(), i.e. when vmid is still in use.
468 */
469 dbgdev_wave_reset_wavefronts(dqm->dev,
470 qpd->pqm->process);
471 qpd->reset_wavefronts = false;
472 }
473
474 deallocate_vmid(dqm, qpd, q);
475 }
476 qpd->queue_count--;
477 if (q->properties.is_active)
478 dqm->queue_count--;
479
480 return retval;
481}
482
483static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
484 struct qcm_process_device *qpd,
485 struct queue *q)
486{
487 int retval;
488
489 dqm_lock(dqm);
490 retval = destroy_queue_nocpsch_locked(dqm, qpd, q);
491 dqm_unlock(dqm);
492
493 return retval;
494}
495
496static int update_queue(struct device_queue_manager *dqm, struct queue *q)
497{
498 int retval = 0;
499 struct mqd_manager *mqd_mgr;
500 struct kfd_process_device *pdd;
501 bool prev_active = false;
502
503 dqm_lock(dqm);
504 pdd = kfd_get_process_device_data(q->device, q->process);
505 if (!pdd) {
506 retval = -ENODEV;
507 goto out_unlock;
508 }
509 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
510 q->properties.type)];
511
512 /* Save previous activity state for counters */
513 prev_active = q->properties.is_active;
514
515 /* Make sure the queue is unmapped before updating the MQD */
516 if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) {
517 retval = unmap_queues_cpsch(dqm,
518 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
519 if (retval) {
520 pr_err("unmap queue failed\n");
521 goto out_unlock;
522 }
523 } else if (prev_active &&
524 (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
525 q->properties.type == KFD_QUEUE_TYPE_SDMA ||
526 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
527 retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
528 KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
529 KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
530 if (retval) {
531 pr_err("destroy mqd failed\n");
532 goto out_unlock;
533 }
534 }
535
536 mqd_mgr->update_mqd(mqd_mgr, q->mqd, &q->properties);
537
538 /*
539 * check active state vs. the previous state and modify
540 * counter accordingly. map_queues_cpsch uses the
541 * dqm->queue_count to determine whether a new runlist must be
542 * uploaded.
543 */
544 if (q->properties.is_active && !prev_active)
545 dqm->queue_count++;
546 else if (!q->properties.is_active && prev_active)
547 dqm->queue_count--;
548
549 if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS)
550 retval = map_queues_cpsch(dqm);
551 else if (q->properties.is_active &&
552 (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
553 q->properties.type == KFD_QUEUE_TYPE_SDMA ||
554 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
555 if (WARN(q->process->mm != current->mm,
556 "should only run in user thread"))
557 retval = -EFAULT;
558 else
559 retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd,
560 q->pipe, q->queue,
561 &q->properties, current->mm);
562 }
563
564out_unlock:
565 dqm_unlock(dqm);
566 return retval;
567}
568
569static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
570 struct qcm_process_device *qpd)
571{
572 struct queue *q;
573 struct mqd_manager *mqd_mgr;
574 struct kfd_process_device *pdd;
575 int retval, ret = 0;
576
577 dqm_lock(dqm);
578 if (qpd->evicted++ > 0) /* already evicted, do nothing */
579 goto out;
580
581 pdd = qpd_to_pdd(qpd);
582 pr_info_ratelimited("Evicting PASID %u queues\n",
583 pdd->process->pasid);
584
585 /* Mark all queues as evicted. Deactivate all active queues on
586 * the qpd.
587 */
588 list_for_each_entry(q, &qpd->queues_list, list) {
589 q->properties.is_evicted = true;
590 if (!q->properties.is_active)
591 continue;
592
593 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
594 q->properties.type)];
595 q->properties.is_active = false;
596 retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
597 KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
598 KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
599 if (retval && !ret)
600 /* Return the first error, but keep going to
601 * maintain a consistent eviction state
602 */
603 ret = retval;
604 dqm->queue_count--;
605 }
606
607out:
608 dqm_unlock(dqm);
609 return ret;
610}
611
612static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
613 struct qcm_process_device *qpd)
614{
615 struct queue *q;
616 struct kfd_process_device *pdd;
617 int retval = 0;
618
619 dqm_lock(dqm);
620 if (qpd->evicted++ > 0) /* already evicted, do nothing */
621 goto out;
622
623 pdd = qpd_to_pdd(qpd);
624 pr_info_ratelimited("Evicting PASID %u queues\n",
625 pdd->process->pasid);
626
627 /* Mark all queues as evicted. Deactivate all active queues on
628 * the qpd.
629 */
630 list_for_each_entry(q, &qpd->queues_list, list) {
631 q->properties.is_evicted = true;
632 if (!q->properties.is_active)
633 continue;
634
635 q->properties.is_active = false;
636 dqm->queue_count--;
637 }
638 retval = execute_queues_cpsch(dqm,
639 qpd->is_debug ?
640 KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES :
641 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
642
643out:
644 dqm_unlock(dqm);
645 return retval;
646}
647
648static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
649 struct qcm_process_device *qpd)
650{
651 struct mm_struct *mm = NULL;
652 struct queue *q;
653 struct mqd_manager *mqd_mgr;
654 struct kfd_process_device *pdd;
655 uint64_t pd_base;
656 int retval, ret = 0;
657
658 pdd = qpd_to_pdd(qpd);
659 /* Retrieve PD base */
660 pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->vm);
661
662 dqm_lock(dqm);
663 if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
664 goto out;
665 if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */
666 qpd->evicted--;
667 goto out;
668 }
669
670 pr_info_ratelimited("Restoring PASID %u queues\n",
671 pdd->process->pasid);
672
673 /* Update PD Base in QPD */
674 qpd->page_table_base = pd_base;
675 pr_debug("Updated PD address to 0x%llx\n", pd_base);
676
677 if (!list_empty(&qpd->queues_list)) {
678 dqm->dev->kfd2kgd->set_vm_context_page_table_base(
679 dqm->dev->kgd,
680 qpd->vmid,
681 qpd->page_table_base);
682 kfd_flush_tlb(pdd);
683 }
684
685 /* Take a safe reference to the mm_struct, which may otherwise
686 * disappear even while the kfd_process is still referenced.
687 */
688 mm = get_task_mm(pdd->process->lead_thread);
689 if (!mm) {
690 ret = -EFAULT;
691 goto out;
692 }
693
694 /* Remove the eviction flags. Activate queues that are not
695 * inactive for other reasons.
696 */
697 list_for_each_entry(q, &qpd->queues_list, list) {
698 q->properties.is_evicted = false;
699 if (!QUEUE_IS_ACTIVE(q->properties))
700 continue;
701
702 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
703 q->properties.type)];
704 q->properties.is_active = true;
705 retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
706 q->queue, &q->properties, mm);
707 if (retval && !ret)
708 /* Return the first error, but keep going to
709 * maintain a consistent eviction state
710 */
711 ret = retval;
712 dqm->queue_count++;
713 }
714 qpd->evicted = 0;
715out:
716 if (mm)
717 mmput(mm);
718 dqm_unlock(dqm);
719 return ret;
720}
721
722static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
723 struct qcm_process_device *qpd)
724{
725 struct queue *q;
726 struct kfd_process_device *pdd;
727 uint64_t pd_base;
728 int retval = 0;
729
730 pdd = qpd_to_pdd(qpd);
731 /* Retrieve PD base */
732 pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->vm);
733
734 dqm_lock(dqm);
735 if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
736 goto out;
737 if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */
738 qpd->evicted--;
739 goto out;
740 }
741
742 pr_info_ratelimited("Restoring PASID %u queues\n",
743 pdd->process->pasid);
744
745 /* Update PD Base in QPD */
746 qpd->page_table_base = pd_base;
747 pr_debug("Updated PD address to 0x%llx\n", pd_base);
748
749 /* activate all active queues on the qpd */
750 list_for_each_entry(q, &qpd->queues_list, list) {
751 q->properties.is_evicted = false;
752 if (!QUEUE_IS_ACTIVE(q->properties))
753 continue;
754
755 q->properties.is_active = true;
756 dqm->queue_count++;
757 }
758 retval = execute_queues_cpsch(dqm,
759 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
760 qpd->evicted = 0;
761out:
762 dqm_unlock(dqm);
763 return retval;
764}
765
766static int register_process(struct device_queue_manager *dqm,
767 struct qcm_process_device *qpd)
768{
769 struct device_process_node *n;
770 struct kfd_process_device *pdd;
771 uint64_t pd_base;
772 int retval;
773
774 n = kzalloc(sizeof(*n), GFP_KERNEL);
775 if (!n)
776 return -ENOMEM;
777
778 n->qpd = qpd;
779
780 pdd = qpd_to_pdd(qpd);
781 /* Retrieve PD base */
782 pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->vm);
783
784 dqm_lock(dqm);
785 list_add(&n->list, &dqm->queues);
786
787 /* Update PD Base in QPD */
788 qpd->page_table_base = pd_base;
789 pr_debug("Updated PD address to 0x%llx\n", pd_base);
790
791 retval = dqm->asic_ops.update_qpd(dqm, qpd);
792
793 dqm->processes_count++;
794
795 dqm_unlock(dqm);
796
797 /* Outside the DQM lock because under the DQM lock we can't do
798 * reclaim or take other locks that others hold while reclaiming.
799 */
800 kfd_inc_compute_active(dqm->dev);
801
802 return retval;
803}
804
805static int unregister_process(struct device_queue_manager *dqm,
806 struct qcm_process_device *qpd)
807{
808 int retval;
809 struct device_process_node *cur, *next;
810
811 pr_debug("qpd->queues_list is %s\n",
812 list_empty(&qpd->queues_list) ? "empty" : "not empty");
813
814 retval = 0;
815 dqm_lock(dqm);
816
817 list_for_each_entry_safe(cur, next, &dqm->queues, list) {
818 if (qpd == cur->qpd) {
819 list_del(&cur->list);
820 kfree(cur);
821 dqm->processes_count--;
822 goto out;
823 }
824 }
825 /* qpd not found in dqm list */
826 retval = 1;
827out:
828 dqm_unlock(dqm);
829
830 /* Outside the DQM lock because under the DQM lock we can't do
831 * reclaim or take other locks that others hold while reclaiming.
832 */
833 if (!retval)
834 kfd_dec_compute_active(dqm->dev);
835
836 return retval;
837}
838
839static int
840set_pasid_vmid_mapping(struct device_queue_manager *dqm, unsigned int pasid,
841 unsigned int vmid)
842{
843 return dqm->dev->kfd2kgd->set_pasid_vmid_mapping(
844 dqm->dev->kgd, pasid, vmid);
845}
846
847static void init_interrupts(struct device_queue_manager *dqm)
848{
849 unsigned int i;
850
851 for (i = 0 ; i < get_pipes_per_mec(dqm) ; i++)
852 if (is_pipe_enabled(dqm, 0, i))
853 dqm->dev->kfd2kgd->init_interrupts(dqm->dev->kgd, i);
854}
855
856static int initialize_nocpsch(struct device_queue_manager *dqm)
857{
858 int pipe, queue;
859
860 pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
861
862 dqm->allocated_queues = kcalloc(get_pipes_per_mec(dqm),
863 sizeof(unsigned int), GFP_KERNEL);
864 if (!dqm->allocated_queues)
865 return -ENOMEM;
866
867 mutex_init(&dqm->lock_hidden);
868 INIT_LIST_HEAD(&dqm->queues);
869 dqm->queue_count = dqm->next_pipe_to_allocate = 0;
870 dqm->sdma_queue_count = 0;
871 dqm->xgmi_sdma_queue_count = 0;
872
873 for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
874 int pipe_offset = pipe * get_queues_per_pipe(dqm);
875
876 for (queue = 0; queue < get_queues_per_pipe(dqm); queue++)
877 if (test_bit(pipe_offset + queue,
878 dqm->dev->shared_resources.queue_bitmap))
879 dqm->allocated_queues[pipe] |= 1 << queue;
880 }
881
882 dqm->vmid_bitmap = (1 << dqm->dev->vm_info.vmid_num_kfd) - 1;
883 dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm));
884 dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm));
885
886 return 0;
887}
888
889static void uninitialize(struct device_queue_manager *dqm)
890{
891 int i;
892
893 WARN_ON(dqm->queue_count > 0 || dqm->processes_count > 0);
894
895 kfree(dqm->allocated_queues);
896 for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
897 kfree(dqm->mqd_mgrs[i]);
898 mutex_destroy(&dqm->lock_hidden);
899 kfd_gtt_sa_free(dqm->dev, dqm->pipeline_mem);
900}
901
902static int start_nocpsch(struct device_queue_manager *dqm)
903{
904 init_interrupts(dqm);
905 return pm_init(&dqm->packets, dqm);
906}
907
908static int stop_nocpsch(struct device_queue_manager *dqm)
909{
910 pm_uninit(&dqm->packets);
911 return 0;
912}
913
914static int allocate_sdma_queue(struct device_queue_manager *dqm,
915 struct queue *q)
916{
917 int bit;
918
919 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
920 if (dqm->sdma_bitmap == 0)
921 return -ENOMEM;
922 bit = __ffs64(dqm->sdma_bitmap);
923 dqm->sdma_bitmap &= ~(1ULL << bit);
924 q->sdma_id = bit;
925 q->properties.sdma_engine_id = q->sdma_id %
926 get_num_sdma_engines(dqm);
927 q->properties.sdma_queue_id = q->sdma_id /
928 get_num_sdma_engines(dqm);
929 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
930 if (dqm->xgmi_sdma_bitmap == 0)
931 return -ENOMEM;
932 bit = __ffs64(dqm->xgmi_sdma_bitmap);
933 dqm->xgmi_sdma_bitmap &= ~(1ULL << bit);
934 q->sdma_id = bit;
935 /* sdma_engine_id is sdma id including
936 * both PCIe-optimized SDMAs and XGMI-
937 * optimized SDMAs. The calculation below
938 * assumes the first N engines are always
939 * PCIe-optimized ones
940 */
941 q->properties.sdma_engine_id = get_num_sdma_engines(dqm) +
942 q->sdma_id % get_num_xgmi_sdma_engines(dqm);
943 q->properties.sdma_queue_id = q->sdma_id /
944 get_num_xgmi_sdma_engines(dqm);
945 }
946
947 pr_debug("SDMA engine id: %d\n", q->properties.sdma_engine_id);
948 pr_debug("SDMA queue id: %d\n", q->properties.sdma_queue_id);
949
950 return 0;
951}
952
953static void deallocate_sdma_queue(struct device_queue_manager *dqm,
954 struct queue *q)
955{
956 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
957 if (q->sdma_id >= get_num_sdma_queues(dqm))
958 return;
959 dqm->sdma_bitmap |= (1ULL << q->sdma_id);
960 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
961 if (q->sdma_id >= get_num_xgmi_sdma_queues(dqm))
962 return;
963 dqm->xgmi_sdma_bitmap |= (1ULL << q->sdma_id);
964 }
965}
966
967/*
968 * Device Queue Manager implementation for cp scheduler
969 */
970
971static int set_sched_resources(struct device_queue_manager *dqm)
972{
973 int i, mec;
974 struct scheduling_resources res;
975
976 res.vmid_mask = dqm->dev->shared_resources.compute_vmid_bitmap;
977
978 res.queue_mask = 0;
979 for (i = 0; i < KGD_MAX_QUEUES; ++i) {
980 mec = (i / dqm->dev->shared_resources.num_queue_per_pipe)
981 / dqm->dev->shared_resources.num_pipe_per_mec;
982
983 if (!test_bit(i, dqm->dev->shared_resources.queue_bitmap))
984 continue;
985
986 /* only acquire queues from the first MEC */
987 if (mec > 0)
988 continue;
989
990 /* This situation may be hit in the future if a new HW
991 * generation exposes more than 64 queues. If so, the
992 * definition of res.queue_mask needs updating
993 */
994 if (WARN_ON(i >= (sizeof(res.queue_mask)*8))) {
995 pr_err("Invalid queue enabled by amdgpu: %d\n", i);
996 break;
997 }
998
999 res.queue_mask |= (1ull << i);
1000 }
1001 res.gws_mask = ~0ull;
1002 res.oac_mask = res.gds_heap_base = res.gds_heap_size = 0;
1003
1004 pr_debug("Scheduling resources:\n"
1005 "vmid mask: 0x%8X\n"
1006 "queue mask: 0x%8llX\n",
1007 res.vmid_mask, res.queue_mask);
1008
1009 return pm_send_set_resources(&dqm->packets, &res);
1010}
1011
1012static int initialize_cpsch(struct device_queue_manager *dqm)
1013{
1014 pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
1015
1016 mutex_init(&dqm->lock_hidden);
1017 INIT_LIST_HEAD(&dqm->queues);
1018 dqm->queue_count = dqm->processes_count = 0;
1019 dqm->sdma_queue_count = 0;
1020 dqm->xgmi_sdma_queue_count = 0;
1021 dqm->active_runlist = false;
1022 dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm));
1023 dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm));
1024
1025 INIT_WORK(&dqm->hw_exception_work, kfd_process_hw_exception);
1026
1027 return 0;
1028}
1029
1030static int start_cpsch(struct device_queue_manager *dqm)
1031{
1032 int retval;
1033
1034 retval = 0;
1035
1036 retval = pm_init(&dqm->packets, dqm);
1037 if (retval)
1038 goto fail_packet_manager_init;
1039
1040 retval = set_sched_resources(dqm);
1041 if (retval)
1042 goto fail_set_sched_resources;
1043
1044 pr_debug("Allocating fence memory\n");
1045
1046 /* allocate fence memory on the gart */
1047 retval = kfd_gtt_sa_allocate(dqm->dev, sizeof(*dqm->fence_addr),
1048 &dqm->fence_mem);
1049
1050 if (retval)
1051 goto fail_allocate_vidmem;
1052
1053 dqm->fence_addr = dqm->fence_mem->cpu_ptr;
1054 dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr;
1055
1056 init_interrupts(dqm);
1057
1058 dqm_lock(dqm);
1059 /* clear hang status when driver try to start the hw scheduler */
1060 dqm->is_hws_hang = false;
1061 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
1062 dqm_unlock(dqm);
1063
1064 return 0;
1065fail_allocate_vidmem:
1066fail_set_sched_resources:
1067 pm_uninit(&dqm->packets);
1068fail_packet_manager_init:
1069 return retval;
1070}
1071
1072static int stop_cpsch(struct device_queue_manager *dqm)
1073{
1074 dqm_lock(dqm);
1075 unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
1076 dqm_unlock(dqm);
1077
1078 kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
1079 pm_uninit(&dqm->packets);
1080
1081 return 0;
1082}
1083
1084static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
1085 struct kernel_queue *kq,
1086 struct qcm_process_device *qpd)
1087{
1088 dqm_lock(dqm);
1089 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
1090 pr_warn("Can't create new kernel queue because %d queues were already created\n",
1091 dqm->total_queue_count);
1092 dqm_unlock(dqm);
1093 return -EPERM;
1094 }
1095
1096 /*
1097 * Unconditionally increment this counter, regardless of the queue's
1098 * type or whether the queue is active.
1099 */
1100 dqm->total_queue_count++;
1101 pr_debug("Total of %d queues are accountable so far\n",
1102 dqm->total_queue_count);
1103
1104 list_add(&kq->list, &qpd->priv_queue_list);
1105 dqm->queue_count++;
1106 qpd->is_debug = true;
1107 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
1108 dqm_unlock(dqm);
1109
1110 return 0;
1111}
1112
1113static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
1114 struct kernel_queue *kq,
1115 struct qcm_process_device *qpd)
1116{
1117 dqm_lock(dqm);
1118 list_del(&kq->list);
1119 dqm->queue_count--;
1120 qpd->is_debug = false;
1121 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
1122 /*
1123 * Unconditionally decrement this counter, regardless of the queue's
1124 * type.
1125 */
1126 dqm->total_queue_count--;
1127 pr_debug("Total of %d queues are accountable so far\n",
1128 dqm->total_queue_count);
1129 dqm_unlock(dqm);
1130}
1131
1132static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
1133 struct qcm_process_device *qpd)
1134{
1135 int retval;
1136 struct mqd_manager *mqd_mgr;
1137
1138 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
1139 pr_warn("Can't create new usermode queue because %d queues were already created\n",
1140 dqm->total_queue_count);
1141 retval = -EPERM;
1142 goto out;
1143 }
1144
1145 if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
1146 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
1147 dqm_lock(dqm);
1148 retval = allocate_sdma_queue(dqm, q);
1149 dqm_unlock(dqm);
1150 if (retval)
1151 goto out;
1152 }
1153
1154 retval = allocate_doorbell(qpd, q);
1155 if (retval)
1156 goto out_deallocate_sdma_queue;
1157
1158 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
1159 q->properties.type)];
1160
1161 if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
1162 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
1163 dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
1164 q->properties.tba_addr = qpd->tba_addr;
1165 q->properties.tma_addr = qpd->tma_addr;
1166 q->mqd_mem_obj = mqd_mgr->allocate_mqd(mqd_mgr->dev, &q->properties);
1167 if (!q->mqd_mem_obj) {
1168 retval = -ENOMEM;
1169 goto out_deallocate_doorbell;
1170 }
1171
1172 dqm_lock(dqm);
1173 /*
1174 * Eviction state logic: mark all queues as evicted, even ones
1175 * not currently active. Restoring inactive queues later only
1176 * updates the is_evicted flag but is a no-op otherwise.
1177 */
1178 q->properties.is_evicted = !!qpd->evicted;
1179 mqd_mgr->init_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj,
1180 &q->gart_mqd_addr, &q->properties);
1181
1182 list_add(&q->list, &qpd->queues_list);
1183 qpd->queue_count++;
1184 if (q->properties.is_active) {
1185 dqm->queue_count++;
1186 retval = execute_queues_cpsch(dqm,
1187 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
1188 }
1189
1190 if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
1191 dqm->sdma_queue_count++;
1192 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
1193 dqm->xgmi_sdma_queue_count++;
1194 /*
1195 * Unconditionally increment this counter, regardless of the queue's
1196 * type or whether the queue is active.
1197 */
1198 dqm->total_queue_count++;
1199
1200 pr_debug("Total of %d queues are accountable so far\n",
1201 dqm->total_queue_count);
1202
1203 dqm_unlock(dqm);
1204 return retval;
1205
1206out_deallocate_doorbell:
1207 deallocate_doorbell(qpd, q);
1208out_deallocate_sdma_queue:
1209 if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
1210 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
1211 dqm_lock(dqm);
1212 deallocate_sdma_queue(dqm, q);
1213 dqm_unlock(dqm);
1214 }
1215out:
1216 return retval;
1217}
1218
1219int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
1220 unsigned int fence_value,
1221 unsigned int timeout_ms)
1222{
1223 unsigned long end_jiffies = msecs_to_jiffies(timeout_ms) + jiffies;
1224
1225 while (*fence_addr != fence_value) {
1226 if (time_after(jiffies, end_jiffies)) {
1227 pr_err("qcm fence wait loop timeout expired\n");
1228 /* In HWS case, this is used to halt the driver thread
1229 * in order not to mess up CP states before doing
1230 * scandumps for FW debugging.
1231 */
1232 while (halt_if_hws_hang)
1233 schedule();
1234
1235 return -ETIME;
1236 }
1237 schedule();
1238 }
1239
1240 return 0;
1241}
1242
1243static int unmap_sdma_queues(struct device_queue_manager *dqm)
1244{
1245 int i, retval = 0;
1246
1247 for (i = 0; i < dqm->dev->device_info->num_sdma_engines +
1248 dqm->dev->device_info->num_xgmi_sdma_engines; i++) {
1249 retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_SDMA,
1250 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, false, i);
1251 if (retval)
1252 return retval;
1253 }
1254 return retval;
1255}
1256
1257/* dqm->lock mutex has to be locked before calling this function */
1258static int map_queues_cpsch(struct device_queue_manager *dqm)
1259{
1260 int retval;
1261
1262 if (dqm->queue_count <= 0 || dqm->processes_count <= 0)
1263 return 0;
1264
1265 if (dqm->active_runlist)
1266 return 0;
1267
1268 retval = pm_send_runlist(&dqm->packets, &dqm->queues);
1269 pr_debug("%s sent runlist\n", __func__);
1270 if (retval) {
1271 pr_err("failed to execute runlist\n");
1272 return retval;
1273 }
1274 dqm->active_runlist = true;
1275
1276 return retval;
1277}
1278
1279/* dqm->lock mutex has to be locked before calling this function */
1280static int unmap_queues_cpsch(struct device_queue_manager *dqm,
1281 enum kfd_unmap_queues_filter filter,
1282 uint32_t filter_param)
1283{
1284 int retval = 0;
1285
1286 if (dqm->is_hws_hang)
1287 return -EIO;
1288 if (!dqm->active_runlist)
1289 return retval;
1290
1291 pr_debug("Before destroying queues, sdma queue count is : %u, xgmi sdma queue count is : %u\n",
1292 dqm->sdma_queue_count, dqm->xgmi_sdma_queue_count);
1293
1294 if (dqm->sdma_queue_count > 0 || dqm->xgmi_sdma_queue_count)
1295 unmap_sdma_queues(dqm);
1296
1297 retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE,
1298 filter, filter_param, false, 0);
1299 if (retval)
1300 return retval;
1301
1302 *dqm->fence_addr = KFD_FENCE_INIT;
1303 pm_send_query_status(&dqm->packets, dqm->fence_gpu_addr,
1304 KFD_FENCE_COMPLETED);
1305 /* should be timed out */
1306 retval = amdkfd_fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED,
1307 queue_preemption_timeout_ms);
1308 if (retval)
1309 return retval;
1310
1311 pm_release_ib(&dqm->packets);
1312 dqm->active_runlist = false;
1313
1314 return retval;
1315}
1316
1317/* dqm->lock mutex has to be locked before calling this function */
1318static int execute_queues_cpsch(struct device_queue_manager *dqm,
1319 enum kfd_unmap_queues_filter filter,
1320 uint32_t filter_param)
1321{
1322 int retval;
1323
1324 if (dqm->is_hws_hang)
1325 return -EIO;
1326 retval = unmap_queues_cpsch(dqm, filter, filter_param);
1327 if (retval) {
1328 pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n");
1329 dqm->is_hws_hang = true;
1330 schedule_work(&dqm->hw_exception_work);
1331 return retval;
1332 }
1333
1334 return map_queues_cpsch(dqm);
1335}
1336
1337static int destroy_queue_cpsch(struct device_queue_manager *dqm,
1338 struct qcm_process_device *qpd,
1339 struct queue *q)
1340{
1341 int retval;
1342 struct mqd_manager *mqd_mgr;
1343
1344 retval = 0;
1345
1346 /* remove queue from list to prevent rescheduling after preemption */
1347 dqm_lock(dqm);
1348
1349 if (qpd->is_debug) {
1350 /*
1351 * error, currently we do not allow to destroy a queue
1352 * of a currently debugged process
1353 */
1354 retval = -EBUSY;
1355 goto failed_try_destroy_debugged_queue;
1356
1357 }
1358
1359 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
1360 q->properties.type)];
1361
1362 deallocate_doorbell(qpd, q);
1363
1364 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
1365 dqm->sdma_queue_count--;
1366 deallocate_sdma_queue(dqm, q);
1367 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
1368 dqm->xgmi_sdma_queue_count--;
1369 deallocate_sdma_queue(dqm, q);
1370 }
1371
1372 list_del(&q->list);
1373 qpd->queue_count--;
1374 if (q->properties.is_active) {
1375 dqm->queue_count--;
1376 retval = execute_queues_cpsch(dqm,
1377 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
1378 if (retval == -ETIME)
1379 qpd->reset_wavefronts = true;
1380 }
1381
1382 /*
1383 * Unconditionally decrement this counter, regardless of the queue's
1384 * type
1385 */
1386 dqm->total_queue_count--;
1387 pr_debug("Total of %d queues are accountable so far\n",
1388 dqm->total_queue_count);
1389
1390 dqm_unlock(dqm);
1391
1392 /* Do free_mqd after dqm_unlock(dqm) to avoid circular locking */
1393 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
1394
1395 return retval;
1396
1397failed_try_destroy_debugged_queue:
1398
1399 dqm_unlock(dqm);
1400 return retval;
1401}
1402
1403/*
1404 * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to
1405 * stay in user mode.
1406 */
1407#define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL
1408/* APE1 limit is inclusive and 64K aligned. */
1409#define APE1_LIMIT_ALIGNMENT 0xFFFF
1410
1411static bool set_cache_memory_policy(struct device_queue_manager *dqm,
1412 struct qcm_process_device *qpd,
1413 enum cache_policy default_policy,
1414 enum cache_policy alternate_policy,
1415 void __user *alternate_aperture_base,
1416 uint64_t alternate_aperture_size)
1417{
1418 bool retval = true;
1419
1420 if (!dqm->asic_ops.set_cache_memory_policy)
1421 return retval;
1422
1423 dqm_lock(dqm);
1424
1425 if (alternate_aperture_size == 0) {
1426 /* base > limit disables APE1 */
1427 qpd->sh_mem_ape1_base = 1;
1428 qpd->sh_mem_ape1_limit = 0;
1429 } else {
1430 /*
1431 * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]},
1432 * SH_MEM_APE1_BASE[31:0], 0x0000 }
1433 * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]},
1434 * SH_MEM_APE1_LIMIT[31:0], 0xFFFF }
1435 * Verify that the base and size parameters can be
1436 * represented in this format and convert them.
1437 * Additionally restrict APE1 to user-mode addresses.
1438 */
1439
1440 uint64_t base = (uintptr_t)alternate_aperture_base;
1441 uint64_t limit = base + alternate_aperture_size - 1;
1442
1443 if (limit <= base || (base & APE1_FIXED_BITS_MASK) != 0 ||
1444 (limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT) {
1445 retval = false;
1446 goto out;
1447 }
1448
1449 qpd->sh_mem_ape1_base = base >> 16;
1450 qpd->sh_mem_ape1_limit = limit >> 16;
1451 }
1452
1453 retval = dqm->asic_ops.set_cache_memory_policy(
1454 dqm,
1455 qpd,
1456 default_policy,
1457 alternate_policy,
1458 alternate_aperture_base,
1459 alternate_aperture_size);
1460
1461 if ((dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0))
1462 program_sh_mem_settings(dqm, qpd);
1463
1464 pr_debug("sh_mem_config: 0x%x, ape1_base: 0x%x, ape1_limit: 0x%x\n",
1465 qpd->sh_mem_config, qpd->sh_mem_ape1_base,
1466 qpd->sh_mem_ape1_limit);
1467
1468out:
1469 dqm_unlock(dqm);
1470 return retval;
1471}
1472
1473static int set_trap_handler(struct device_queue_manager *dqm,
1474 struct qcm_process_device *qpd,
1475 uint64_t tba_addr,
1476 uint64_t tma_addr)
1477{
1478 uint64_t *tma;
1479
1480 if (dqm->dev->cwsr_enabled) {
1481 /* Jump from CWSR trap handler to user trap */
1482 tma = (uint64_t *)(qpd->cwsr_kaddr + KFD_CWSR_TMA_OFFSET);
1483 tma[0] = tba_addr;
1484 tma[1] = tma_addr;
1485 } else {
1486 qpd->tba_addr = tba_addr;
1487 qpd->tma_addr = tma_addr;
1488 }
1489
1490 return 0;
1491}
1492
1493static int process_termination_nocpsch(struct device_queue_manager *dqm,
1494 struct qcm_process_device *qpd)
1495{
1496 struct queue *q, *next;
1497 struct device_process_node *cur, *next_dpn;
1498 int retval = 0;
1499 bool found = false;
1500
1501 dqm_lock(dqm);
1502
1503 /* Clear all user mode queues */
1504 list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
1505 int ret;
1506
1507 ret = destroy_queue_nocpsch_locked(dqm, qpd, q);
1508 if (ret)
1509 retval = ret;
1510 }
1511
1512 /* Unregister process */
1513 list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) {
1514 if (qpd == cur->qpd) {
1515 list_del(&cur->list);
1516 kfree(cur);
1517 dqm->processes_count--;
1518 found = true;
1519 break;
1520 }
1521 }
1522
1523 dqm_unlock(dqm);
1524
1525 /* Outside the DQM lock because under the DQM lock we can't do
1526 * reclaim or take other locks that others hold while reclaiming.
1527 */
1528 if (found)
1529 kfd_dec_compute_active(dqm->dev);
1530
1531 return retval;
1532}
1533
1534static int get_wave_state(struct device_queue_manager *dqm,
1535 struct queue *q,
1536 void __user *ctl_stack,
1537 u32 *ctl_stack_used_size,
1538 u32 *save_area_used_size)
1539{
1540 struct mqd_manager *mqd_mgr;
1541 int r;
1542
1543 dqm_lock(dqm);
1544
1545 if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE ||
1546 q->properties.is_active || !q->device->cwsr_enabled) {
1547 r = -EINVAL;
1548 goto dqm_unlock;
1549 }
1550
1551 mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_COMPUTE];
1552
1553 if (!mqd_mgr->get_wave_state) {
1554 r = -EINVAL;
1555 goto dqm_unlock;
1556 }
1557
1558 r = mqd_mgr->get_wave_state(mqd_mgr, q->mqd, ctl_stack,
1559 ctl_stack_used_size, save_area_used_size);
1560
1561dqm_unlock:
1562 dqm_unlock(dqm);
1563 return r;
1564}
1565
1566static int process_termination_cpsch(struct device_queue_manager *dqm,
1567 struct qcm_process_device *qpd)
1568{
1569 int retval;
1570 struct queue *q, *next;
1571 struct kernel_queue *kq, *kq_next;
1572 struct mqd_manager *mqd_mgr;
1573 struct device_process_node *cur, *next_dpn;
1574 enum kfd_unmap_queues_filter filter =
1575 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES;
1576 bool found = false;
1577
1578 retval = 0;
1579
1580 dqm_lock(dqm);
1581
1582 /* Clean all kernel queues */
1583 list_for_each_entry_safe(kq, kq_next, &qpd->priv_queue_list, list) {
1584 list_del(&kq->list);
1585 dqm->queue_count--;
1586 qpd->is_debug = false;
1587 dqm->total_queue_count--;
1588 filter = KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES;
1589 }
1590
1591 /* Clear all user mode queues */
1592 list_for_each_entry(q, &qpd->queues_list, list) {
1593 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
1594 dqm->sdma_queue_count--;
1595 deallocate_sdma_queue(dqm, q);
1596 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
1597 dqm->xgmi_sdma_queue_count--;
1598 deallocate_sdma_queue(dqm, q);
1599 }
1600
1601 if (q->properties.is_active)
1602 dqm->queue_count--;
1603
1604 dqm->total_queue_count--;
1605 }
1606
1607 /* Unregister process */
1608 list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) {
1609 if (qpd == cur->qpd) {
1610 list_del(&cur->list);
1611 kfree(cur);
1612 dqm->processes_count--;
1613 found = true;
1614 break;
1615 }
1616 }
1617
1618 retval = execute_queues_cpsch(dqm, filter, 0);
1619 if ((!dqm->is_hws_hang) && (retval || qpd->reset_wavefronts)) {
1620 pr_warn("Resetting wave fronts (cpsch) on dev %p\n", dqm->dev);
1621 dbgdev_wave_reset_wavefronts(dqm->dev, qpd->pqm->process);
1622 qpd->reset_wavefronts = false;
1623 }
1624
1625 dqm_unlock(dqm);
1626
1627 /* Outside the DQM lock because under the DQM lock we can't do
1628 * reclaim or take other locks that others hold while reclaiming.
1629 */
1630 if (found)
1631 kfd_dec_compute_active(dqm->dev);
1632
1633 /* Lastly, free mqd resources.
1634 * Do free_mqd() after dqm_unlock to avoid circular locking.
1635 */
1636 list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
1637 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
1638 q->properties.type)];
1639 list_del(&q->list);
1640 qpd->queue_count--;
1641 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
1642 }
1643
1644 return retval;
1645}
1646
1647static int init_mqd_managers(struct device_queue_manager *dqm)
1648{
1649 int i, j;
1650 struct mqd_manager *mqd_mgr;
1651
1652 for (i = 0; i < KFD_MQD_TYPE_MAX; i++) {
1653 mqd_mgr = dqm->asic_ops.mqd_manager_init(i, dqm->dev);
1654 if (!mqd_mgr) {
1655 pr_err("mqd manager [%d] initialization failed\n", i);
1656 goto out_free;
1657 }
1658 dqm->mqd_mgrs[i] = mqd_mgr;
1659 }
1660
1661 return 0;
1662
1663out_free:
1664 for (j = 0; j < i; j++) {
1665 kfree(dqm->mqd_mgrs[j]);
1666 dqm->mqd_mgrs[j] = NULL;
1667 }
1668
1669 return -ENOMEM;
1670}
1671
1672/* Allocate one hiq mqd (HWS) and all SDMA mqd in a continuous trunk*/
1673static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm)
1674{
1675 int retval;
1676 struct kfd_dev *dev = dqm->dev;
1677 struct kfd_mem_obj *mem_obj = &dqm->hiq_sdma_mqd;
1678 uint32_t size = dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size *
1679 dev->device_info->num_sdma_engines *
1680 dev->device_info->num_sdma_queues_per_engine +
1681 dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size;
1682
1683 retval = amdgpu_amdkfd_alloc_gtt_mem(dev->kgd, size,
1684 &(mem_obj->gtt_mem), &(mem_obj->gpu_addr),
1685 (void *)&(mem_obj->cpu_ptr), true);
1686
1687 return retval;
1688}
1689
1690struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
1691{
1692 struct device_queue_manager *dqm;
1693
1694 pr_debug("Loading device queue manager\n");
1695
1696 dqm = kzalloc(sizeof(*dqm), GFP_KERNEL);
1697 if (!dqm)
1698 return NULL;
1699
1700 switch (dev->device_info->asic_family) {
1701 /* HWS is not available on Hawaii. */
1702 case CHIP_HAWAII:
1703 /* HWS depends on CWSR for timely dequeue. CWSR is not
1704 * available on Tonga.
1705 *
1706 * FIXME: This argument also applies to Kaveri.
1707 */
1708 case CHIP_TONGA:
1709 dqm->sched_policy = KFD_SCHED_POLICY_NO_HWS;
1710 break;
1711 default:
1712 dqm->sched_policy = sched_policy;
1713 break;
1714 }
1715
1716 dqm->dev = dev;
1717 switch (dqm->sched_policy) {
1718 case KFD_SCHED_POLICY_HWS:
1719 case KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION:
1720 /* initialize dqm for cp scheduling */
1721 dqm->ops.create_queue = create_queue_cpsch;
1722 dqm->ops.initialize = initialize_cpsch;
1723 dqm->ops.start = start_cpsch;
1724 dqm->ops.stop = stop_cpsch;
1725 dqm->ops.destroy_queue = destroy_queue_cpsch;
1726 dqm->ops.update_queue = update_queue;
1727 dqm->ops.register_process = register_process;
1728 dqm->ops.unregister_process = unregister_process;
1729 dqm->ops.uninitialize = uninitialize;
1730 dqm->ops.create_kernel_queue = create_kernel_queue_cpsch;
1731 dqm->ops.destroy_kernel_queue = destroy_kernel_queue_cpsch;
1732 dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
1733 dqm->ops.set_trap_handler = set_trap_handler;
1734 dqm->ops.process_termination = process_termination_cpsch;
1735 dqm->ops.evict_process_queues = evict_process_queues_cpsch;
1736 dqm->ops.restore_process_queues = restore_process_queues_cpsch;
1737 dqm->ops.get_wave_state = get_wave_state;
1738 break;
1739 case KFD_SCHED_POLICY_NO_HWS:
1740 /* initialize dqm for no cp scheduling */
1741 dqm->ops.start = start_nocpsch;
1742 dqm->ops.stop = stop_nocpsch;
1743 dqm->ops.create_queue = create_queue_nocpsch;
1744 dqm->ops.destroy_queue = destroy_queue_nocpsch;
1745 dqm->ops.update_queue = update_queue;
1746 dqm->ops.register_process = register_process;
1747 dqm->ops.unregister_process = unregister_process;
1748 dqm->ops.initialize = initialize_nocpsch;
1749 dqm->ops.uninitialize = uninitialize;
1750 dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
1751 dqm->ops.set_trap_handler = set_trap_handler;
1752 dqm->ops.process_termination = process_termination_nocpsch;
1753 dqm->ops.evict_process_queues = evict_process_queues_nocpsch;
1754 dqm->ops.restore_process_queues =
1755 restore_process_queues_nocpsch;
1756 dqm->ops.get_wave_state = get_wave_state;
1757 break;
1758 default:
1759 pr_err("Invalid scheduling policy %d\n", dqm->sched_policy);
1760 goto out_free;
1761 }
1762
1763 switch (dev->device_info->asic_family) {
1764 case CHIP_CARRIZO:
1765 device_queue_manager_init_vi(&dqm->asic_ops);
1766 break;
1767
1768 case CHIP_KAVERI:
1769 device_queue_manager_init_cik(&dqm->asic_ops);
1770 break;
1771
1772 case CHIP_HAWAII:
1773 device_queue_manager_init_cik_hawaii(&dqm->asic_ops);
1774 break;
1775
1776 case CHIP_TONGA:
1777 case CHIP_FIJI:
1778 case CHIP_POLARIS10:
1779 case CHIP_POLARIS11:
1780 case CHIP_POLARIS12:
1781 case CHIP_VEGAM:
1782 device_queue_manager_init_vi_tonga(&dqm->asic_ops);
1783 break;
1784
1785 case CHIP_VEGA10:
1786 case CHIP_VEGA12:
1787 case CHIP_VEGA20:
1788 case CHIP_RAVEN:
1789 case CHIP_ARCTURUS:
1790 device_queue_manager_init_v9(&dqm->asic_ops);
1791 break;
1792 case CHIP_NAVI10:
1793 device_queue_manager_init_v10_navi10(&dqm->asic_ops);
1794 break;
1795 default:
1796 WARN(1, "Unexpected ASIC family %u",
1797 dev->device_info->asic_family);
1798 goto out_free;
1799 }
1800
1801 if (init_mqd_managers(dqm))
1802 goto out_free;
1803
1804 if (allocate_hiq_sdma_mqd(dqm)) {
1805 pr_err("Failed to allocate hiq sdma mqd trunk buffer\n");
1806 goto out_free;
1807 }
1808
1809 if (!dqm->ops.initialize(dqm))
1810 return dqm;
1811
1812out_free:
1813 kfree(dqm);
1814 return NULL;
1815}
1816
1817static void deallocate_hiq_sdma_mqd(struct kfd_dev *dev,
1818 struct kfd_mem_obj *mqd)
1819{
1820 WARN(!mqd, "No hiq sdma mqd trunk to free");
1821
1822 amdgpu_amdkfd_free_gtt_mem(dev->kgd, mqd->gtt_mem);
1823}
1824
1825void device_queue_manager_uninit(struct device_queue_manager *dqm)
1826{
1827 dqm->ops.uninitialize(dqm);
1828 deallocate_hiq_sdma_mqd(dqm->dev, &dqm->hiq_sdma_mqd);
1829 kfree(dqm);
1830}
1831
1832int kfd_process_vm_fault(struct device_queue_manager *dqm,
1833 unsigned int pasid)
1834{
1835 struct kfd_process_device *pdd;
1836 struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
1837 int ret = 0;
1838
1839 if (!p)
1840 return -EINVAL;
1841 pdd = kfd_get_process_device_data(dqm->dev, p);
1842 if (pdd)
1843 ret = dqm->ops.evict_process_queues(dqm, &pdd->qpd);
1844 kfd_unref_process(p);
1845
1846 return ret;
1847}
1848
1849static void kfd_process_hw_exception(struct work_struct *work)
1850{
1851 struct device_queue_manager *dqm = container_of(work,
1852 struct device_queue_manager, hw_exception_work);
1853 amdgpu_amdkfd_gpu_reset(dqm->dev->kgd);
1854}
1855
1856#if defined(CONFIG_DEBUG_FS)
1857
1858static void seq_reg_dump(struct seq_file *m,
1859 uint32_t (*dump)[2], uint32_t n_regs)
1860{
1861 uint32_t i, count;
1862
1863 for (i = 0, count = 0; i < n_regs; i++) {
1864 if (count == 0 ||
1865 dump[i-1][0] + sizeof(uint32_t) != dump[i][0]) {
1866 seq_printf(m, "%s %08x: %08x",
1867 i ? "\n" : "",
1868 dump[i][0], dump[i][1]);
1869 count = 7;
1870 } else {
1871 seq_printf(m, " %08x", dump[i][1]);
1872 count--;
1873 }
1874 }
1875
1876 seq_puts(m, "\n");
1877}
1878
1879int dqm_debugfs_hqds(struct seq_file *m, void *data)
1880{
1881 struct device_queue_manager *dqm = data;
1882 uint32_t (*dump)[2], n_regs;
1883 int pipe, queue;
1884 int r = 0;
1885
1886 r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->kgd,
1887 KFD_CIK_HIQ_PIPE, KFD_CIK_HIQ_QUEUE,
1888 &dump, &n_regs);
1889 if (!r) {
1890 seq_printf(m, " HIQ on MEC %d Pipe %d Queue %d\n",
1891 KFD_CIK_HIQ_PIPE/get_pipes_per_mec(dqm)+1,
1892 KFD_CIK_HIQ_PIPE%get_pipes_per_mec(dqm),
1893 KFD_CIK_HIQ_QUEUE);
1894 seq_reg_dump(m, dump, n_regs);
1895
1896 kfree(dump);
1897 }
1898
1899 for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
1900 int pipe_offset = pipe * get_queues_per_pipe(dqm);
1901
1902 for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) {
1903 if (!test_bit(pipe_offset + queue,
1904 dqm->dev->shared_resources.queue_bitmap))
1905 continue;
1906
1907 r = dqm->dev->kfd2kgd->hqd_dump(
1908 dqm->dev->kgd, pipe, queue, &dump, &n_regs);
1909 if (r)
1910 break;
1911
1912 seq_printf(m, " CP Pipe %d, Queue %d\n",
1913 pipe, queue);
1914 seq_reg_dump(m, dump, n_regs);
1915
1916 kfree(dump);
1917 }
1918 }
1919
1920 for (pipe = 0; pipe < get_num_sdma_engines(dqm); pipe++) {
1921 for (queue = 0;
1922 queue < dqm->dev->device_info->num_sdma_queues_per_engine;
1923 queue++) {
1924 r = dqm->dev->kfd2kgd->hqd_sdma_dump(
1925 dqm->dev->kgd, pipe, queue, &dump, &n_regs);
1926 if (r)
1927 break;
1928
1929 seq_printf(m, " SDMA Engine %d, RLC %d\n",
1930 pipe, queue);
1931 seq_reg_dump(m, dump, n_regs);
1932
1933 kfree(dump);
1934 }
1935 }
1936
1937 return r;
1938}
1939
1940int dqm_debugfs_execute_queues(struct device_queue_manager *dqm)
1941{
1942 int r = 0;
1943
1944 dqm_lock(dqm);
1945 dqm->active_runlist = true;
1946 r = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
1947 dqm_unlock(dqm);
1948
1949 return r;
1950}
1951
1952#endif